diff --git a/.gitmodules b/.gitmodules index 0b0b1aa87c..c8bbca863d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -109,3 +109,6 @@ [submodule "src/dhcprelay"] path = src/dhcprelay url = https://github.com/sonic-net/sonic-dhcp-relay.git +[submodule "src/sonic-host-services"] + path = src/sonic-host-services + url = https://github.com/sonic-net/sonic-host-services diff --git a/src/sonic-host-services b/src/sonic-host-services new file mode 160000 index 0000000000..709046bbec --- /dev/null +++ b/src/sonic-host-services @@ -0,0 +1 @@ +Subproject commit 709046bbec9d05c9bf06e7c54a23ae0f9c970281 diff --git a/src/sonic-host-services/.gitignore b/src/sonic-host-services/.gitignore deleted file mode 100644 index e807a5b8f7..0000000000 --- a/src/sonic-host-services/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Python files -*.pyc -scripts/caclmgrdc -scripts/hostcfgdc -scripts/aaastatsdc -scripts/procdockerstatsdc - -# Generated by packaging -*.egg-info/ -.eggs/ -build/ -dist/ - -# Unit test coverage -.coverage -.pytest_cache/ -coverage.xml -htmlcov/ -test-results.xml - -# Unit test scratchpad -tests/hostcfgd/output/* diff --git a/src/sonic-host-services/__init__.py b/src/sonic-host-services/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sonic-host-services/host_modules/host_service.py b/src/sonic-host-services/host_modules/host_service.py deleted file mode 100644 index 48f55ae062..0000000000 --- a/src/sonic-host-services/host_modules/host_service.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Base class for host modules""" - -import dbus.service -import dbus - -BUS_NAME_BASE = 'org.SONiC.HostService' -BUS_PATH = '/org/SONiC/HostService' - -def bus_name(mod_name): - """Return the bus name for the service""" - return BUS_NAME_BASE + '.' + mod_name - -def bus_path(mod_name): - """Return the bus path for the service""" - return BUS_PATH + '/' + mod_name - -method = dbus.service.method - -class HostService(dbus.service.Object): - """Service class for top level DBus endpoint""" - def __init__(self, mod_name): - self.bus = dbus.SystemBus() - self.bus_name = dbus.service.BusName(BUS_NAME_BASE, self.bus) - super(HostService, self).__init__(self.bus_name, BUS_PATH) - -class HostModule(dbus.service.Object): - """Base class for all host modules""" - def __init__(self, mod_name): - self.bus = dbus.SystemBus() - self.bus_name = dbus.service.BusName(bus_name(mod_name), self.bus) - super(HostModule, self).__init__(self.bus_name, bus_path(mod_name)) - -def register(): - return HostService, "host_service" diff --git a/src/sonic-host-services/host_modules/showtech.py b/src/sonic-host-services/host_modules/showtech.py deleted file mode 100644 index 2b603d4f52..0000000000 --- a/src/sonic-host-services/host_modules/showtech.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Show techsupport command handler""" - -import host_service -import subprocess -import re - -MOD_NAME = 'showtech' - -class Showtech(host_service.HostModule): - """DBus endpoint that executes the "show techsupport" command - """ - @host_service.method(host_service.bus_name(MOD_NAME), in_signature='s', out_signature='is') - def info(self, date): - - ERROR_TAR_FAILED = 5 - ERROR_PROCFS_SAVE_FAILED = 6 - ERROR_INVALID_ARGUMENT = 10 - - err_dict = {ERROR_INVALID_ARGUMENT: 'Invalid input: Incorrect DateTime format', - ERROR_TAR_FAILED: 'Failure saving information into compressed output file', - ERROR_PROCFS_SAVE_FAILED: 'Saving of process information failed'} - - cmd = ['/usr/local/bin/generate_dump'] - if date: - cmd.append("-s") - cmd.append(date) - - try: - result = subprocess.run(cmd, capture_output=True, text=True, - check=True) - - except subprocess.CalledProcessError as err: - errmsg = err_dict.get(err.returncode) - - if errmsg is None: - output = 'Error: Failure code {:-5}'.format(err.returncode) - else: - output = errmsg - - print("%Error: Host side: Failed: " + str(err.returncode)) - return err.returncode, output - - output_file_match = re.search('\/var\/.*dump.*\.gz', result.stdout) - output_filename = output_file_match.group() - return result.returncode, output_filename - -def register(): - """Return the class name""" - return Showtech, MOD_NAME - diff --git a/src/sonic-host-services/pytest.ini b/src/sonic-host-services/pytest.ini deleted file mode 100644 index c4b03d4c34..0000000000 --- a/src/sonic-host-services/pytest.ini +++ /dev/null @@ -1,2 +0,0 @@ -[pytest] -addopts = --cov=scripts --cov-report html --cov-report term --cov-report xml --ignore=tests/*/test*_vectors.py diff --git a/src/sonic-host-services/scripts/aaastatsd b/src/sonic-host-services/scripts/aaastatsd deleted file mode 100755 index 2cc8f027e5..0000000000 --- a/src/sonic-host-services/scripts/aaastatsd +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/env python3 -# - -import os -import syslog -import threading -from swsscommon.swsscommon import ConfigDBConnector -from watchdog.observers import Observer -from watchdog.events import FileSystemEventHandler - -# FILE -RADIUS_PAM_AUTH_CONF_DIR = "/etc/pam_radius_auth.d/" -RADIUS_PAM_AUTH_CONF_STATS_DIR = "/etc/pam_radius_auth.d/statistics/" - -class RadiusCountersDbMon (threading.Thread): - def __init__(self, ID, name, radiusStatsInstance): - threading.Thread.__init__(self) - self.ID = ID - self.name = name - self.radiusStatsInstance = radiusStatsInstance - - def handle_CountersDbRadiusClear(self, key, data): - # print("RadiusCountersDbMon.handle_CountersDbRadiusClear()") - if key == 'clear': - self.radiusStatsInstance.handle_clear() - - def run(self): - # print("RadiusCountersDbMon.run()") - self.radiusStatsInstance.counters_db.subscribe('RADIUS', lambda table, key, data: self.handle_CountersDbRadiusClear(key, data)) - self.radiusStatsInstance.counters_db.listen() - # print("RadiusCountersDbMon.run(): After listen()") - - -class RadiusStatsFileHandler(FileSystemEventHandler): - def __init__(self, radiusStatsInstance): - self.radiusStatsInstance = radiusStatsInstance - - def on_any_event(self, event): - # print("RadiusStatsFileHandler.on_any_event()") - if event.is_directory: - return None - - self.radiusStatsInstance.handle_update(os.path.basename(event.src_path)) - -class RadiusStatsFileMon (): - def __init__(self, radiusStatsInstance): - self.event_handler = RadiusStatsFileHandler(radiusStatsInstance) - self.observer = Observer() - self.observer.schedule(self.event_handler, RADIUS_PAM_AUTH_CONF_STATS_DIR, recursive=False) - self.observer.start() - # print("RadiusStatsFileMon.__init__(): After observer.start()") - - def stop(self): - # print("RadiusStatsFileMon.stop()") - self.observer.stop() - # print("RadiusStatsFileMon.stop(): After observer.stop()") - self.observer.join() - # print("RadiusStatsFileMon.stop(): After observer.join()") - - -class RadiusStatistics: - def __init__(self, cfg_db, rad_global_conf, radius_conf): - - self.radius_counter_names = [ - "counter_0", - "access_requests", - "access_accepts", - "access_rejects", - "accounting_requests", - "accounting_responses", - "counter_6", - "counter_7", - "counter_8", - "counter_9", - "counter_10", - "access_challenges", - "counter_12", - "counter_13", - "counter_14", - "counter_15", - "counter_16", - "retried_access_requests", - "counter_18", - "counter_19", - "retried_accounting_requests", - "counter_21", - "counter_22", - "counter_23", - "counter_24", - "counter_25", - "counter_26", - "retried_access_challenges", - "counter_28", - "counter_29", - "counter_30", - "counter_31", - "timeouts", - "bad_authenticators", - "invalid_packets", - "counter_35", - ] - - self.radius_global = { - 'statistics': 'False' - } - - self.radius_servers = {} - - self.config_db = cfg_db - - for row in rad_global_conf: - self.radius_global_update(row, rad_global_conf[row]) - - for row in radius_conf: - self.radius_server_update(row, radius_conf[row]) - - - self.counters_db = ConfigDBConnector() - self.counters_db.db_connect('COUNTERS_DB', wait_for_init=False, - retry_on=True) - syslog.syslog(syslog.LOG_INFO, 'CountersDB connect success') - self.dbmon_thread = RadiusCountersDbMon("RadiusCountersDbMon", - "RadiusCountersDbMon", self) - self.dbmon_thread.daemon = True - self.dbmon_thread.start() - - self.filemon = RadiusStatsFileMon(self) - syslog.syslog(syslog.LOG_INFO, 'RADIUS Stats File Monitor started') - - def radius_global_update(self, key, data): - if key == 'global': - self.radius_global.update(data) - - for addr in self.radius_servers: - self.create_file(addr) - - def radius_server_update(self, key, data): - if data == {}: - if key in self.radius_servers: - del self.radius_servers[key] - else: - self.radius_servers[key] = data - - self.create_file(key) - - def create_file(self, addr): - # print( "RadiusStatistics.create_file({})".format(addr)) - stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + addr - if self.radius_global['statistics'] == 'False': - if os.path.exists(stats_file): - os.unlink(stats_file) - else: - open(stats_file, 'a').close() - os.chmod(stats_file, 0o666) - self.handle_update(addr) - - def handle_clear(self): - # print( "RadiusStatistics.handle_clear()") - for filename in os.listdir(RADIUS_PAM_AUTH_CONF_STATS_DIR): - stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + filename - open(stats_file, 'w').close() - - def handle_update(self, srv): - # print( "RadiusStatistics.handle_update({})".format(srv)) - if self.radius_global['statistics'] == 'False': - return - - stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + srv - entry = None - if os.path.exists(stats_file): - with open(stats_file, 'r') as f: - lines = f.readlines() - if len(lines) > 0: - radius_counters = lines[0].split(' ') - entry = dict(zip(self.radius_counter_names, radius_counters)) - - counters_db = ConfigDBConnector() - counters_db.db_connect('COUNTERS_DB', wait_for_init=False, - retry_on=False) - - counters_db.set_entry('RADIUS_SERVER_STATS', srv, entry) - - counters_db.close(counters_db.COUNTERS_DB) - -class AAAStatsDaemon: - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect(wait_for_init=True, retry_on=True) - syslog.syslog(syslog.LOG_INFO, 'ConfigDB connect success') - - radius_global = self.config_db.get_table('RADIUS') - radius_server = self.config_db.get_table('RADIUS_SERVER') - - self.radiusstats = RadiusStatistics(self.config_db, radius_global, - radius_server) - - def radius_global_handler(self, key, data): - self.radiusstats.radius_global_update(key, data) - - def radius_server_handler(self, key, data): - self.radiusstats.radius_server_update(key, data) - - def start(self): - self.config_db.subscribe('RADIUS_SERVER', - lambda table, key, data: self.radius_server_handler(key, data)) - self.config_db.subscribe('RADIUS', - lambda table, key, data: self.radius_global_handler(key, data)) - self.config_db.listen() - # print( "After config_db.listen()") - syslog.syslog(syslog.LOG_INFO, 'Stopping FileMon') - self.radiusstats.filemon.stop() - # print( "Exiting") - syslog.syslog(syslog.LOG_INFO, 'Exiting') - -def main(): - daemon = AAAStatsDaemon() - daemon.start() - - -if __name__ == "__main__": - main() - diff --git a/src/sonic-host-services/scripts/caclmgrd b/src/sonic-host-services/scripts/caclmgrd deleted file mode 100755 index 4af588e28d..0000000000 --- a/src/sonic-host-services/scripts/caclmgrd +++ /dev/null @@ -1,900 +0,0 @@ -#!/usr/bin/env python3 -# -# caclmgrd -# -# Control plane ACL manager daemon for SONiC -# -# Upon starting, this daemon reads control plane ACL tables and rules from -# Config DB, converts the rules into iptables rules and installs the iptables -# rules. The daemon then indefintely listens for notifications from Config DB -# and updates iptables rules if control plane ACL configuration has changed. -# - -try: - import ipaddress - import os - import subprocess - import sys - import threading - import time - - from sonic_py_common import daemon_base, device_info, multi_asic - from swsscommon import swsscommon -except ImportError as err: - raise ImportError("%s - required module not found" % str(err)) - -VERSION = "1.0" - -SYSLOG_IDENTIFIER = "caclmgrd" - -DEFAULT_NAMESPACE = '' - - -# ========================== Helper Functions ========================= - - -def _ip_prefix_in_key(key): - """ - Function to check if IP prefix is present in a Redis database key. - If it is present, then the key will be a tuple. Otherwise, the - key will be a string. - """ - return (isinstance(key, tuple)) - -# ============================== Classes ============================== - - -class ControlPlaneAclManager(daemon_base.DaemonBase): - """ - Class which reads control plane ACL tables and rules from Config DB, - translates them into equivalent iptables commands and runs those - commands in order to apply the control plane ACLs. - Attributes: - config_db: Handle to Config Redis database via SwSS SDK - """ - FEATURE_TABLE = "FEATURE" - ACL_TABLE = "ACL_TABLE" - ACL_RULE = "ACL_RULE" - DEVICE_METADATA_TABLE = "DEVICE_METADATA" - MUX_CABLE_TABLE = "MUX_CABLE_TABLE" - - ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE" - - BFD_SESSION_TABLE = "BFD_SESSION_TABLE" - - # To specify a port range instead of a single port, use iptables format: - # separate start and end ports with a colon, e.g., "1000:2000" - ACL_SERVICES = { - "NTP": { - "ip_protocols": ["udp"], - "dst_ports": ["123"], - "multi_asic_ns_to_host_fwd":False - }, - "SNMP": { - "ip_protocols": ["tcp", "udp"], - "dst_ports": ["161"], - "multi_asic_ns_to_host_fwd":True - }, - "SSH": { - "ip_protocols": ["tcp"], - "dst_ports": ["22"], - "multi_asic_ns_to_host_fwd":True - }, - "ANY": { - "ip_protocols": ["any"], - "dst_ports": ["0"], - "multi_asic_ns_to_host_fwd":False - } - } - - UPDATE_DELAY_SECS = 0.5 - - DualToR = False - bfdAllowed = False - - def __init__(self, log_identifier): - super(ControlPlaneAclManager, self).__init__(log_identifier) - - # Update-thread-specific data per namespace - self.update_thread = {} - self.lock = {} - self.num_changes = {} - - # Initialize update-thread-specific data for default namespace - self.update_thread[DEFAULT_NAMESPACE] = None - self.lock[DEFAULT_NAMESPACE] = threading.Lock() - self.num_changes[DEFAULT_NAMESPACE] = 0 - - if device_info.is_multi_npu(): - swsscommon.SonicDBConfig.load_sonic_global_db_config() - - self.config_db_map = {} - self.iptables_cmd_ns_prefix = {} - self.config_db_map[DEFAULT_NAMESPACE] = swsscommon.ConfigDBConnector(use_unix_socket_path=True, namespace=DEFAULT_NAMESPACE) - self.config_db_map[DEFAULT_NAMESPACE].connect() - self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE] = "" - self.namespace_mgmt_ip = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE], DEFAULT_NAMESPACE) - self.namespace_mgmt_ipv6 = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE], DEFAULT_NAMESPACE) - self.namespace_docker_mgmt_ip = {} - self.namespace_docker_mgmt_ipv6 = {} - - # Get all features that are present {feature_name : True/False} - self.feature_present = {} - self.update_feature_present() - - metadata = self.config_db_map[DEFAULT_NAMESPACE].get_table(self.DEVICE_METADATA_TABLE) - if 'subtype' in metadata['localhost'] and metadata['localhost']['subtype'] == 'DualToR': - self.DualToR = True - - namespaces = multi_asic.get_all_namespaces() - - for front_asic_namespace in namespaces['front_ns']: - self.update_thread[front_asic_namespace] = None - self.lock[front_asic_namespace] = threading.Lock() - self.num_changes[front_asic_namespace] = 0 - - self.config_db_map[front_asic_namespace] = swsscommon.ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespace) - self.config_db_map[front_asic_namespace].connect() - self.iptables_cmd_ns_prefix[front_asic_namespace] = "ip netns exec " + front_asic_namespace + " " - self.namespace_docker_mgmt_ip[front_asic_namespace] = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[front_asic_namespace], - front_asic_namespace) - self.namespace_docker_mgmt_ipv6[front_asic_namespace] = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[front_asic_namespace], - front_asic_namespace) - - for back_asic_namespace in namespaces['back_ns']: - self.update_thread[back_asic_namespace] = None - self.lock[back_asic_namespace] = threading.Lock() - self.num_changes[back_asic_namespace] = 0 - - self.iptables_cmd_ns_prefix[back_asic_namespace] = "ip netns exec " + back_asic_namespace + " " - self.namespace_docker_mgmt_ip[back_asic_namespace] = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[back_asic_namespace], - back_asic_namespace) - self.namespace_docker_mgmt_ipv6[back_asic_namespace] = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[back_asic_namespace], - back_asic_namespace) - - def get_namespace_mgmt_ip(self, iptable_ns_cmd_prefix, namespace): - ip_address_get_command = iptable_ns_cmd_prefix + "ip -4 -o addr show " + ("eth0" if namespace else "docker0") +\ - " | awk '{print $4}' | cut -d'/' -f1 | head -1" - - return self.run_commands([ip_address_get_command]) - - def get_namespace_mgmt_ipv6(self, iptable_ns_cmd_prefix, namespace): - ipv6_address_get_command = iptable_ns_cmd_prefix + "ip -6 -o addr show scope global " + ("eth0" if namespace else "docker0") +\ - " | awk '{print $4}' | cut -d'/' -f1 | head -1" - return self.run_commands([ipv6_address_get_command]) - - def run_commands(self, commands): - """ - Given a list of shell commands, run them in order - Args: - commands: List of strings, each string is a shell command - """ - for cmd in commands: - proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE) - - (stdout, stderr) = proc.communicate() - - if proc.returncode != 0: - self.log_error("Error running command '{}'".format(cmd)) - elif stdout: - return stdout.rstrip('\n') - return "" - - def parse_int_to_tcp_flags(self, hex_value): - tcp_flags_str = "" - if hex_value & 0x01: - tcp_flags_str += "FIN," - if hex_value & 0x02: - tcp_flags_str += "SYN," - if hex_value & 0x04: - tcp_flags_str += "RST," - if hex_value & 0x08: - tcp_flags_str += "PSH," - if hex_value & 0x10: - tcp_flags_str += "ACK," - if hex_value & 0x20: - tcp_flags_str += "URG," - # iptables doesn't handle the flags below now. It has some special keys for it: - # --ecn-tcp-cwr This matches if the TCP ECN CWR (Congestion Window Received) bit is set. - # --ecn-tcp-ece This matches if the TCP ECN ECE (ECN Echo) bit is set. - # if hex_value & 0x40: - # tcp_flags_str += "ECE," - # if hex_value & 0x80: - # tcp_flags_str += "CWR," - - # Delete the trailing comma - tcp_flags_str = tcp_flags_str[:-1] - return tcp_flags_str - - def update_feature_present(self): - feature_tb_info = self.config_db_map[DEFAULT_NAMESPACE].get_table(self.FEATURE_TABLE) - if feature_tb_info: - for k, v in feature_tb_info.items(): - self.feature_present[k] = True - - def generate_block_ip2me_traffic_iptables_commands(self, namespace): - INTERFACE_TABLE_NAME_LIST = [ - "LOOPBACK_INTERFACE", - "MGMT_INTERFACE", - "VLAN_INTERFACE", - "PORTCHANNEL_INTERFACE", - "INTERFACE" - ] - - block_ip2me_cmds = [] - - # Add iptables rules to drop all packets destined for peer-to-peer interface IP addresses - for iface_table_name in INTERFACE_TABLE_NAME_LIST: - iface_table = self.config_db_map[namespace].get_table(iface_table_name) - if iface_table: - for key, _ in iface_table.items(): - if not _ip_prefix_in_key(key): - continue - - iface_name, iface_cidr = key - ip_ntwrk = ipaddress.ip_network(iface_cidr, strict=False) - - # For VLAN interfaces, the IP address we want to block is the default gateway (i.e., - # the first available host IP address of the VLAN subnet) - ip_addr = next(ip_ntwrk.hosts()) if iface_table_name == "VLAN_INTERFACE" else ip_ntwrk.network_address - - if isinstance(ip_ntwrk, ipaddress.IPv4Network): - block_ip2me_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -d {}/{} -j DROP".format(ip_addr, ip_ntwrk.max_prefixlen)) - elif isinstance(ip_ntwrk, ipaddress.IPv6Network): - block_ip2me_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -d {}/{} -j DROP".format(ip_addr, ip_ntwrk.max_prefixlen)) - else: - self.log_warning("Unrecognized IP address type on interface '{}': {}".format(iface_name, ip_ntwrk)) - - return block_ip2me_cmds - - def generate_allow_internal_docker_ip_traffic_commands(self, namespace): - allow_internal_docker_ip_cmds = [] - - if namespace: - # For namespace docker allow local communication on docker management ip for all proto - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_docker_mgmt_ip[namespace], self.namespace_docker_mgmt_ip[namespace])) - - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_docker_mgmt_ipv6[namespace], self.namespace_docker_mgmt_ipv6[namespace])) - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_mgmt_ip, self.namespace_docker_mgmt_ip[namespace])) - - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_mgmt_ipv6, self.namespace_docker_mgmt_ipv6[namespace])) - - else: - - # Also host namespace communication on docker bridge on multi-asic. - if self.namespace_docker_mgmt_ip: - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_mgmt_ip, self.namespace_mgmt_ip)) - - if self.namespace_docker_mgmt_ipv6: - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format - (self.namespace_mgmt_ipv6, self.namespace_mgmt_ipv6)) - # In host allow all tcp/udp traffic from namespace docker eth0 management ip to host docker bridge - for docker_mgmt_ip in list(self.namespace_docker_mgmt_ip.values()): - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format - (docker_mgmt_ip, self.namespace_mgmt_ip)) - - for docker_mgmt_ipv6 in list(self.namespace_docker_mgmt_ipv6.values()): - allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format - (docker_mgmt_ipv6, self.namespace_mgmt_ipv6)) - - return allow_internal_docker_ip_cmds - - def generate_fwd_traffic_from_namespace_to_host_commands(self, namespace, acl_source_ip_map): - """ - The below SNAT and DNAT rules are added in asic namespace in multi-ASIC platforms. It helps to forward request coming - in through the front panel interfaces created/present in the asic namespace for the servie running in linux host network namespace. - The external IP addresses are NATed to the internal docker IP addresses for the Host service to respond. - """ - - if not namespace: - return [] - - fwd_traffic_from_namespace_to_host_cmds = [] - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -t nat -X") - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -t nat -F") - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -t nat -X") - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -t nat -F") - - for acl_service in self.ACL_SERVICES: - if self.ACL_SERVICES[acl_service]["multi_asic_ns_to_host_fwd"]: - # Get the Source IP Set if exists else use default source ip prefix - nat_source_ipv4_set = acl_source_ip_map[acl_service]["ipv4"] if acl_source_ip_map and acl_source_ip_map[acl_service]["ipv4"] else { "0.0.0.0/0" } - nat_source_ipv6_set = acl_source_ip_map[acl_service]["ipv6"] if acl_source_ip_map and acl_source_ip_map[acl_service]["ipv6"] else { "::/0" } - - for ip_protocol in self.ACL_SERVICES[acl_service]["ip_protocols"]: - for dst_port in self.ACL_SERVICES[acl_service]["dst_ports"]: - for ipv4_src_ip in nat_source_ipv4_set: - # IPv4 rules - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + - "iptables -t nat -A PREROUTING -p {} -s {} --dport {} -j DNAT --to-destination {}".format - (ip_protocol, ipv4_src_ip, dst_port, - self.namespace_mgmt_ip)) - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + - "iptables -t nat -A POSTROUTING -p {} -s {} --dport {} -j SNAT --to-source {}".format - (ip_protocol, ipv4_src_ip, dst_port, - self.namespace_docker_mgmt_ip[namespace])) - for ipv6_src_ip in nat_source_ipv6_set: - # IPv6 rules - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + - "ip6tables -t nat -A PREROUTING -p {} -s {} --dport {} -j DNAT --to-destination {}".format - (ip_protocol, ipv6_src_ip, dst_port, - self.namespace_mgmt_ipv6)) - fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + - "ip6tables -t nat -A POSTROUTING -p {} -s {} --dport {} -j SNAT --to-source {}".format - (ip_protocol,ipv6_src_ip, dst_port, - self.namespace_docker_mgmt_ipv6[namespace])) - - return fwd_traffic_from_namespace_to_host_cmds - - def is_rule_ipv4(self, rule_props): - if (("SRC_IP" in rule_props and rule_props["SRC_IP"]) or - ("DST_IP" in rule_props and rule_props["DST_IP"])): - return True - else: - return False - - def is_rule_ipv6(self, rule_props): - if (("SRC_IPV6" in rule_props and rule_props["SRC_IPV6"]) or - ("DST_IPV6" in rule_props and rule_props["DST_IPV6"])): - return True - else: - return False - - def setup_dhcp_chain(self, namespace): - all_chains = self.get_chain_list(self.iptables_cmd_ns_prefix[namespace], [""]) - dhcp_chain_exist = "DHCP" in all_chains - - iptables_cmds = [] - if dhcp_chain_exist: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -F DHCP") - self.log_info("DHCP chain exists, flush") - else: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -N DHCP") - self.log_info("DHCP chain does not exist, create") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A DHCP -j RETURN") - - self.log_info("Issuing the following iptables commands for DHCP chain:") - for cmd in iptables_cmds: - self.log_info(" " + cmd) - - self.run_commands(iptables_cmds) - - def get_chain_list(self, iptable_ns_cmd_prefix, exclude_list): - command = iptable_ns_cmd_prefix + "iptables -L -v -n | grep Chain | awk '{print $2}'" - chain_list = self.run_commands([command]).splitlines() - - for chain in exclude_list: - if chain in chain_list: - chain_list.remove(chain) - - return chain_list - - def dhcp_acl_rule(self, iptable_ns_cmd_prefix, op, intf, mark): - ''' - sample: iptables --insert/delete/check DHCP -m physdev --physdev-in Ethernet4 -j DROP - sample: iptables --insert/delete/check DHCP -m mark --mark 0x67004 -j DROP - ''' - if mark is None: - return iptable_ns_cmd_prefix + 'iptables --{} DHCP -m physdev --physdev-in {} -j DROP'.format(op, intf) - else: - return iptable_ns_cmd_prefix + 'iptables --{} DHCP -m mark --mark {} -j DROP'.format(op, mark) - - def update_dhcp_chain(self, op, intf, mark): - for namespace in list(self.config_db_map.keys()): - check_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "check", intf, mark) - update_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], op, intf, mark) - - execute = 0 - ret = subprocess.call(check_cmd, shell=True) # ret==0 indicates the rule exists - - if op == "insert" and ret == 1: - execute = 1 - if op == "delete" and ret == 0: - execute = 1 - - if execute == 1: - subprocess.call(update_cmd, shell=True) - self.log_info("Update DHCP chain: {}".format(update_cmd)) - - def update_dhcp_acl(self, key, op, data, mark): - if "state" not in data: - self.log_warning("Unexpected update in MUX_CABLE_TABLE") - return - - intf = key - state = data["state"] - - if state == "active": - self.update_dhcp_chain("delete", intf, mark) - elif state == "standby": - self.update_dhcp_chain("insert", intf, mark) - elif state == "unknown": - self.update_dhcp_chain("delete", intf, mark) - elif state == "error": - self.log_warning("Cable state shows error") - else: - self.log_warning("Unexpected cable state") - - def update_dhcp_acl_for_mark_change(self, key, pre_mark, cur_mark): - for namespace in list(self.config_db_map.keys()): - check_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "check", key, pre_mark) - - ret = subprocess.call(check_cmd, shell=True) # ret==0 indicates the rule exists - - '''update only when the rule with pre_mark exists''' - if ret == 0: - delete_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "delete", key, pre_mark) - insert_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "insert", key, cur_mark) - - subprocess.call(delete_cmd, shell=True) - self.log_info("Update DHCP chain: {}".format(delete_cmd)) - subprocess.call(insert_cmd, shell=True) - self.log_info("Update DHCP chain: {}".format(insert_cmd)) - - def get_acl_rules_and_translate_to_iptables_commands(self, namespace): - """ - Retrieves current ACL tables and rules from Config DB, translates - control plane ACLs into a list of iptables commands that can be run - in order to install ACL rules. - Returns: - A list of strings, each string is an iptables shell command - """ - iptables_cmds = [] - service_to_source_ip_map = {} - - # First, add iptables commands to set default policies to accept all - # traffic. In case we are connected remotely, the connection will not - # drop when we flush the current rules - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P INPUT ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P FORWARD ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P OUTPUT ACCEPT") - - # Add iptables command to flush the current rules and delete all non-default chains - chain_list = self.get_chain_list(self.iptables_cmd_ns_prefix[namespace], ["DHCP"] if self.DualToR else [""]) - for chain in chain_list: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -F " + chain) - if chain not in ["INPUT", "FORWARD", "OUTPUT"]: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -X " + chain) - - # Add same set of commands for ip6tables - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P INPUT ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P FORWARD ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P OUTPUT ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -F") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -X") - - # Add iptables/ip6tables commands to allow all traffic from localhost - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s 127.0.0.1 -i lo -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s ::1 -i lo -j ACCEPT") - - # Add iptables commands to allow internal docker traffic - iptables_cmds += self.generate_allow_internal_docker_ip_traffic_commands(namespace) - - # Add iptables/ip6tables commands to allow all incoming packets from established - # connections or new connections which are related to established connections - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT") - - # Add iptables/ip6tables commands to allow bidirectional ICMPv4 ping and traceroute - # TODO: Support processing ICMPv4 service ACL rules, and remove this blanket acceptance - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type time-exceeded -j ACCEPT") - - # Add iptables/ip6tables commands to allow bidirectional ICMPv6 ping and traceroute - # TODO: Support processing ICMPv6 service ACL rules, and remove this blanket acceptance - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type echo-request -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type echo-reply -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT") - - # Add iptables/ip6tables commands to allow all incoming Neighbor Discovery Protocol (NDP) NS/NA/RS/RA messages - # TODO: Support processing NDP service ACL rules, and remove this blanket acceptance - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-solicitation -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-advertisement -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type router-solicitation -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type router-advertisement -j ACCEPT") - - # Add iptables commands to link the DCHP chain to block dhcp packets based on ingress interfaces - if self.DualToR: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 67 -j DHCP") - - # Add iptables/ip6tables commands to allow all incoming IPv4 DHCP packets - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 67:68 -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p udp --dport 67:68 -j ACCEPT") - - # Add iptables/ip6tables commands to allow all incoming IPv6 DHCP packets - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 546:547 -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p udp --dport 546:547 -j ACCEPT") - - # Add iptables/ip6tables commands to allow all incoming BGP traffic - # TODO: Determine BGP ACLs based on configured device sessions, and remove this blanket acceptance - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p tcp --dport 179 -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p tcp --dport 179 -j ACCEPT") - - # Get current ACL tables and rules from Config DB - self._tables_db_info = self.config_db_map[namespace].get_table(self.ACL_TABLE) - self._rules_db_info = self.config_db_map[namespace].get_table(self.ACL_RULE) - - num_ctrl_plane_acl_rules = 0 - - # Walk the ACL tables - for (table_name, table_data) in self._tables_db_info.items(): - - table_ip_version = None - - # Ignore non-control-plane ACL tables - if table_data["type"] != self.ACL_TABLE_TYPE_CTRLPLANE: - continue - - acl_services = table_data["services"] - - for acl_service in acl_services: - if acl_service not in self.ACL_SERVICES: - self.log_warning("Ignoring control plane ACL '{}' with unrecognized service '{}'" - .format(table_name, acl_service)) - continue - - self.log_info("Translating ACL rules for control plane ACL '{}' (service: '{}')" - .format(table_name, acl_service)) - - # Obtain default IP protocol(s) and destination port(s) for this service - ip_protocols = self.ACL_SERVICES[acl_service]["ip_protocols"] - dst_ports = self.ACL_SERVICES[acl_service]["dst_ports"] - - acl_rules = {} - - for ((rule_table_name, rule_id), rule_props) in self._rules_db_info.items(): - rule_props = {k.upper(): v for k,v in rule_props.items()} - if rule_table_name == table_name: - if not rule_props: - self.log_warning("rule_props for rule_id {} empty or null!".format(rule_id)) - continue - - try: - acl_rules[rule_props["PRIORITY"]] = rule_props - except KeyError: - self.log_error("rule_props for rule_id {} does not have key 'PRIORITY'!".format(rule_id)) - continue - - # If we haven't determined the IP version for this ACL table yet, - # try to do it now. We attempt to determine heuristically based on - # whether the src or dst IP of this rule is an IPv4 or IPv6 address. - if not table_ip_version: - if self.is_rule_ipv6(rule_props): - table_ip_version = 6 - elif self.is_rule_ipv4(rule_props): - table_ip_version = 4 - - if (self.is_rule_ipv6(rule_props) and (table_ip_version == 4)): - self.log_error("CtrlPlane ACL table {} is a IPv4 based table and rule {} is a IPV6 rule! Ignoring rule." - .format(table_name, rule_id)) - acl_rules.pop(rule_props["PRIORITY"]) - elif (self.is_rule_ipv4(rule_props) and (table_ip_version == 6)): - self.log_error("CtrlPlane ACL table {} is a IPv6 based table and rule {} is a IPV4 rule! Ignroing rule." - .format(table_name, rule_id)) - acl_rules.pop(rule_props["PRIORITY"]) - - # If we were unable to determine whether this ACL table contains - # IPv4 or IPv6 rules, log a message and skip processing this table. - if not table_ip_version: - self.log_warning("Unable to determine if ACL table '{}' contains IPv4 or IPv6 rules. Skipping table..." - .format(table_name)) - continue - ipv4_src_ip_set = set() - ipv6_src_ip_set = set() - # For each ACL rule in this table (in descending order of priority) - for priority in sorted(iter(acl_rules.keys()), reverse=True): - rule_props = acl_rules[priority] - - if "PACKET_ACTION" not in rule_props: - self.log_error("ACL rule does not contain PACKET_ACTION property") - continue - - # Apply the rule to the default protocol(s) for this ACL service - for ip_protocol in ip_protocols: - for dst_port in dst_ports: - rule_cmd = "ip6tables" if table_ip_version == 6 else "iptables" - - rule_cmd += " -A INPUT" - if ip_protocol != "any": - rule_cmd += " -p {}".format(ip_protocol) - - if "SRC_IPV6" in rule_props and rule_props["SRC_IPV6"]: - rule_cmd += " -s {}".format(rule_props["SRC_IPV6"]) - if rule_props["PACKET_ACTION"] == "ACCEPT": - ipv6_src_ip_set.add(rule_props["SRC_IPV6"]) - elif "SRC_IP" in rule_props and rule_props["SRC_IP"]: - rule_cmd += " -s {}".format(rule_props["SRC_IP"]) - if rule_props["PACKET_ACTION"] == "ACCEPT": - ipv4_src_ip_set.add(rule_props["SRC_IP"]) - - # Destination port 0 is reserved/unused port, so, using it to apply the rule to all ports. - if dst_port != "0": - rule_cmd += " --dport {}".format(dst_port) - - # If there are TCP flags present and ip protocol is TCP, append them - if ip_protocol == "tcp" and "TCP_FLAGS" in rule_props and rule_props["TCP_FLAGS"]: - tcp_flags, tcp_flags_mask = rule_props["TCP_FLAGS"].split("/") - - tcp_flags = int(tcp_flags, 16) - tcp_flags_mask = int(tcp_flags_mask, 16) - - if tcp_flags_mask > 0: - rule_cmd += " --tcp-flags {mask} {flags}".format(mask=self.parse_int_to_tcp_flags(tcp_flags_mask), flags=self.parse_int_to_tcp_flags(tcp_flags)) - - # Append the packet action as the jump target - rule_cmd += " -j {}".format(rule_props["PACKET_ACTION"]) - - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + rule_cmd) - num_ctrl_plane_acl_rules += 1 - - - service_to_source_ip_map.update({ acl_service:{ "ipv4":ipv4_src_ip_set, "ipv6":ipv6_src_ip_set } }) - - # Add iptables commands to block ip2me traffic - iptables_cmds += self.generate_block_ip2me_traffic_iptables_commands(namespace) - - # Add iptables/ip6tables commands to allow all incoming packets with TTL of 0 or 1 - # This allows the device to respond to tools like tcptraceroute - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -m ttl --ttl-lt 2 -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p tcp -m hl --hl-lt 2 -j ACCEPT") - - # Finally, if the device has control plane ACLs configured, - # add iptables/ip6tables commands to drop all other incoming packets - if num_ctrl_plane_acl_rules > 0: - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -j DROP") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -j DROP") - - return iptables_cmds, service_to_source_ip_map - - def update_control_plane_acls(self, namespace): - """ - Convenience wrapper which retrieves current ACL tables and rules from - Config DB, translates control plane ACLs into a list of iptables - commands and runs them. - """ - iptables_cmds, service_to_source_ip_map = self.get_acl_rules_and_translate_to_iptables_commands(namespace) - self.log_info("Issuing the following iptables commands:") - for cmd in iptables_cmds: - self.log_info(" " + cmd) - - self.run_commands(iptables_cmds) - - self.update_control_plane_nat_acls(namespace, service_to_source_ip_map) - - def update_control_plane_nat_acls(self, namespace, service_to_source_ip_map): - """ - Convenience wrapper for multi-asic platforms - which programs the NAT rules for redirecting the - traffic coming on the front panel interface map to namespace - to the host. - """ - # Add iptables commands to allow front panel traffic - iptables_cmds = self.generate_fwd_traffic_from_namespace_to_host_commands(namespace, service_to_source_ip_map) - - self.log_info("Issuing the following iptables commands:") - for cmd in iptables_cmds: - self.log_info(" " + cmd) - - self.run_commands(iptables_cmds) - - def check_and_update_control_plane_acls(self, namespace, num_changes): - """ - This function is intended to be spawned in a separate thread. - Its purpose is to prevent unnecessary iptables updates if we receive - multiple rapid ACL table update notifications. It sleeps for UPDATE_DELAY_SECS - then checks if any more ACL table updates were received in that window. If new - updates were received, it will sleep again and repeat the process until no - updates were received during the delay window, at which point it will update - iptables using the current ACL rules. - """ - while True: - # Sleep for our delay interval - time.sleep(self.UPDATE_DELAY_SECS) - - with self.lock[namespace]: - if self.num_changes[namespace] > num_changes: - # More ACL table changes occurred since this thread was spawned - # spawn a new thread with the current number of changes - new_changes = self.num_changes[namespace] - num_changes - self.log_info("ACL config not stable for namespace '{}': {} changes detected in the past {} seconds. Skipping update ..." - .format(namespace, new_changes, self.UPDATE_DELAY_SECS)) - num_changes = self.num_changes[namespace] - else: - if num_changes == self.num_changes[namespace] and num_changes > 0: - self.log_info("ACL config for namespace '{}' has not changed for {} seconds. Applying updates ..." - .format(namespace, self.UPDATE_DELAY_SECS)) - self.update_control_plane_acls(namespace) - else: - self.log_error("Error updating ACLs for namespace '{}'".format(namespace)) - - # Re-initialize - self.num_changes[namespace] = 0 - self.update_thread[namespace] = None - return - - def allow_bfd_protocol(self, namespace): - iptables_cmds = [] - # Add iptables/ip6tables commands to allow all BFD singlehop and multihop sessions - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT") - iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT") - self.run_commands(iptables_cmds) - - def run(self): - # Set select timeout to 1 second - SELECT_TIMEOUT_MS = 1000 - - self.log_info("Starting up ...") - - if not os.geteuid() == 0: - self.log_error("Must be root to run this daemon") - print("Error: Must be root to run this daemon") - sys.exit(1) - - # Initlaize Global config that loads all database*.json - if device_info.is_multi_npu(): - swsscommon.SonicDBConfig.initializeGlobalConfig() - - # Create the Select object - sel = swsscommon.Select() - - # Set up STATE_DB connector to monitor the change in MUX_CABLE_TABLE - state_db_connector = None - subscribe_mux_cable = None - subscribe_dhcp_packet_mark = None - state_db_id = swsscommon.SonicDBConfig.getDbId("STATE_DB") - dhcp_packet_mark_tbl = {} - - # set up state_db connector - state_db_connector = swsscommon.DBConnector("STATE_DB", 0) - - if self.DualToR: - self.log_info("Dual ToR mode") - - subscribe_mux_cable = swsscommon.SubscriberStateTable(state_db_connector, self.MUX_CABLE_TABLE) - sel.addSelectable(subscribe_mux_cable) - - subscribe_dhcp_packet_mark = swsscommon.SubscriberStateTable(state_db_connector, "DHCP_PACKET_MARK") - sel.addSelectable(subscribe_dhcp_packet_mark) - - # create DHCP chain - for namespace in list(self.config_db_map.keys()): - self.setup_dhcp_chain(namespace) - - # This should be migrated from state_db BFD session table to feature_table in the future when feature table support gets added for BFD - subscribe_bfd_session = swsscommon.SubscriberStateTable(state_db_connector, self.BFD_SESSION_TABLE) - sel.addSelectable(subscribe_bfd_session) - - # Map of Namespace <--> susbcriber table's object - config_db_subscriber_table_map = {} - - # Loop through all asic namespaces (if present) and host namespace (DEFAULT_NAMESPACE) - for namespace in list(self.config_db_map.keys()): - # Unconditionally update control plane ACLs once at start on given namespace - self.update_control_plane_acls(namespace) - # Connect to Config DB of given namespace - acl_db_connector = swsscommon.DBConnector("CONFIG_DB", 0, False, namespace) - # Subscribe to notifications when ACL tables changes - subscribe_acl_table = swsscommon.SubscriberStateTable(acl_db_connector, swsscommon.CFG_ACL_TABLE_TABLE_NAME) - # Subscribe to notifications when ACL rule tables changes - subscribe_acl_rule_table = swsscommon.SubscriberStateTable(acl_db_connector, swsscommon.CFG_ACL_RULE_TABLE_NAME) - # Add both tables to the selectable object - sel.addSelectable(subscribe_acl_table) - sel.addSelectable(subscribe_acl_rule_table) - # Update the map - config_db_subscriber_table_map[namespace] = [] - config_db_subscriber_table_map[namespace].append(subscribe_acl_table) - config_db_subscriber_table_map[namespace].append(subscribe_acl_rule_table) - - # Get the ACL rule table seprator - acl_rule_table_seprator = subscribe_acl_rule_table.getTableNameSeparator() - - # Loop on select to see if any event happen on state db or config db of any namespace - while True: - (state, selectableObj) = sel.select(SELECT_TIMEOUT_MS) - # Continue if select is timeout or selectable object is not return - if state != swsscommon.Select.OBJECT: - continue - - # Get the redisselect object from selectable object - redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(selectableObj) - - # Get the corresponding namespace and db_id from redisselect - namespace = redisSelectObj.getDbConnector().getNamespace() - db_id = redisSelectObj.getDbConnector().getDbId() - - if db_id == state_db_id: - while True: - key, op, fvs = subscribe_bfd_session.pop() - if not key: - break - - if op == 'SET' and not self.bfdAllowed: - self.allow_bfd_protocol(namespace) - self.bfdAllowed = True - sel.removeSelectable(subscribe_bfd_session) - - if self.DualToR: - '''dhcp packet mark update''' - while True: - key, op, fvs = subscribe_dhcp_packet_mark.pop() - if not key: - break - self.log_info("dhcp packet mark update : '%s'" % str((key, op, fvs))) - - '''initial value is None''' - pre_mark = None if key not in dhcp_packet_mark_tbl else dhcp_packet_mark_tbl[key] - cur_mark = None if op == 'DEL' else dict(fvs)['mark'] - dhcp_packet_mark_tbl[key] = cur_mark - self.update_dhcp_acl_for_mark_change(key, pre_mark, cur_mark) - - '''mux cable update''' - while True: - key, op, fvs = subscribe_mux_cable.pop() - if not key: - break - self.log_info("mux cable update : '%s'" % str((key, op, fvs))) - - mark = None if key not in dhcp_packet_mark_tbl else dhcp_packet_mark_tbl[key] - self.update_dhcp_acl(key, op, dict(fvs), mark) - continue - - ctrl_plane_acl_notification = set() - - # Pop data of both Subscriber Table object of namespace that got config db acl table event - for table in config_db_subscriber_table_map[namespace]: - while True: - (key, op, fvp) = table.pop() - # Pop of table that does not have data so break - if key == '': - break - # ACL Table notification. We will take Control Plane ACTION for any ACL Table Event - # This can be optimize further but we should not have many acl table set/del events in normal - # scenario - if acl_rule_table_seprator not in key: - ctrl_plane_acl_notification.add(namespace) - # Check ACL Rule notification and make sure Rule point to ACL Table which is Controlplane - else: - acl_table = key.split(acl_rule_table_seprator)[0] - if self.config_db_map[namespace].get_table(self.ACL_TABLE)[acl_table]["type"] == self.ACL_TABLE_TYPE_CTRLPLANE: - ctrl_plane_acl_notification.add(namespace) - - # Update the Control Plane ACL of the namespace that got config db acl table event - for namespace in ctrl_plane_acl_notification: - with self.lock[namespace]: - if self.num_changes[namespace] == 0: - self.log_info("ACL change detected for namespace '{}'".format(namespace)) - - # Increment the number of change events we've received for this namespace - self.num_changes[namespace] += 1 - - # If an update thread is not already spawned for the namespace which we received - # the ACL table update event, spawn one now - if not self.update_thread[namespace]: - self.log_info("Spawning ACL update thread for namepsace '{}' ...".format(namespace)) - self.update_thread[namespace] = threading.Thread(target=self.check_and_update_control_plane_acls, - args=(namespace, self.num_changes[namespace])) - self.update_thread[namespace].start() - -# ============================= Functions ============================= - - -def main(): - # Instantiate a ControlPlaneAclManager object - caclmgr = ControlPlaneAclManager(SYSLOG_IDENTIFIER) - - # Log all messages from INFO level and higher - caclmgr.set_min_log_priority_info() - - caclmgr.run() - - -if __name__ == "__main__": - main() diff --git a/src/sonic-host-services/scripts/determine-reboot-cause b/src/sonic-host-services/scripts/determine-reboot-cause deleted file mode 100755 index 1408ad0e29..0000000000 --- a/src/sonic-host-services/scripts/determine-reboot-cause +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/env python3 -# -# determine-reboot-cause -# -# Program designed to run once, soon after system boot which will -# determine the cause of the previous reboot and store it to the disk, -# - -try: - import datetime - import json - import os - import pwd - import re - import sys - - from sonic_py_common import device_info, logger - -except ImportError as err: - raise ImportError("%s - required module not found" % str(err)) - -VERSION = "1.0" - -SYSLOG_IDENTIFIER = "determine-reboot-cause" - -REBOOT_CAUSE_DIR = "/host/reboot-cause/" -REBOOT_CAUSE_HISTORY_DIR = "/host/reboot-cause/history/" -REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "reboot-cause.txt") -PREVIOUS_REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "previous-reboot-cause.json") -FIRST_BOOT_PLATFORM_FILE = "/tmp/notify_firstboot_to_platform" -REBOOT_TYPE_KEXEC_FILE = "/proc/cmdline" -# The following SONIC_BOOT_TYPEs come from the warm/fast reboot script which is in sonic-utilities -# Because the system can be rebooted from some old versions, we have to take all possible BOOT options into consideration. -# On 201803, 201807 we have -# BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') fast-reboot" -# On 201811 and later we have -# BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" where BOOT_TYPE_ARG can be warm, fastfast or fast -# To extract the commom part of them, we should have the following PATTERN -REBOOT_TYPE_KEXEC_PATTERN_WARM = ".*SONIC_BOOT_TYPE=(warm|fastfast).*" -REBOOT_TYPE_KEXEC_PATTERN_FAST = ".*SONIC_BOOT_TYPE=(fast|fast-reboot).*" - -REBOOT_CAUSE_UNKNOWN = "Unknown" -REBOOT_CAUSE_NON_HARDWARE = "Non-Hardware" -REBOOT_CAUSE_HARDWARE_OTHER = "Hardware - Other" - -# Global logger class instance -sonic_logger = logger.Logger(SYSLOG_IDENTIFIER) - - -# ============================= Functions ============================= -def parse_warmfast_reboot_from_proc_cmdline(): - if os.path.isfile(REBOOT_TYPE_KEXEC_FILE): - with open(REBOOT_TYPE_KEXEC_FILE) as cause_file: - cause_file_kexec = cause_file.readline() - m = re.search(REBOOT_TYPE_KEXEC_PATTERN_WARM, cause_file_kexec) - if m and m.group(1): - return 'warm-reboot' - m = re.search(REBOOT_TYPE_KEXEC_PATTERN_FAST, cause_file_kexec) - if m and m.group(1): - return 'fast-reboot' - return None - - -def find_software_reboot_cause_from_reboot_cause_file(): - software_reboot_cause = REBOOT_CAUSE_UNKNOWN - if os.path.isfile(REBOOT_CAUSE_FILE): - with open(REBOOT_CAUSE_FILE) as cause_file: - software_reboot_cause = cause_file.readline().rstrip('\n') - sonic_logger.log_info("{} indicates the reboot cause: {}".format(REBOOT_CAUSE_FILE, software_reboot_cause)) - else: - sonic_logger.log_info("Reboot cause file {} not found".format(REBOOT_CAUSE_FILE)) - return software_reboot_cause - - -def find_first_boot_version(): - build_version = "unknown" - version_info = device_info.get_sonic_version_info() - if version_info: - build_version = version_info['build_version'] - return " (First boot of SONiC version {})".format(build_version) - - -def find_software_reboot_cause(): - software_reboot_cause = find_software_reboot_cause_from_reboot_cause_file() - if software_reboot_cause == REBOOT_CAUSE_UNKNOWN: - if os.path.isfile(FIRST_BOOT_PLATFORM_FILE): - software_reboot_cause += find_first_boot_version() - os.remove(FIRST_BOOT_PLATFORM_FILE) - return software_reboot_cause - - -def find_proc_cmdline_reboot_cause(): - proc_cmdline_reboot_cause = parse_warmfast_reboot_from_proc_cmdline() - - if proc_cmdline_reboot_cause: - sonic_logger.log_info("/proc/cmdline indicates reboot type: {}".format(proc_cmdline_reboot_cause)) - else: - sonic_logger.log_info("No reboot cause found from /proc/cmdline") - - return proc_cmdline_reboot_cause - - -def get_reboot_cause_from_platform(): - # Find hardware reboot cause using sonic_platform library - try: - import sonic_platform - platform = sonic_platform.platform.Platform() - chassis = platform.get_chassis() - hardware_reboot_cause_major, hardware_reboot_cause_minor = chassis.get_reboot_cause() - sonic_logger.log_info("Platform api returns reboot cause {}, {}".format(hardware_reboot_cause_major, hardware_reboot_cause_minor)) - except ImportError: - sonic_logger.log_warning("sonic_platform package not installed. Unable to detect hardware reboot causes.") - hardware_reboot_cause_major, hardware_reboot_cause_minor = REBOOT_CAUSE_NON_HARDWARE, "N/A" - - return hardware_reboot_cause_major, hardware_reboot_cause_minor - - -def find_hardware_reboot_cause(): - hardware_reboot_cause_major, hardware_reboot_cause_minor = get_reboot_cause_from_platform() - if hardware_reboot_cause_major: - sonic_logger.log_info("Platform api indicates reboot cause {}".format(hardware_reboot_cause_major)) - else: - sonic_logger.log_info("No reboot cause found from platform api") - - hardware_reboot_cause = "{} ({})".format(hardware_reboot_cause_major, hardware_reboot_cause_minor) - return hardware_reboot_cause - - -def get_reboot_cause_dict(previous_reboot_cause, comment, gen_time): - """Store the key infomation of device reboot into a dictionary by parsing the string in - previous_reboot_cause. - - If user issused a command to reboot device, then user, command and time will be - stored into a dictionary. - - If device was rebooted due to the kernel panic, then the string `Kernel Panic` - and time will be stored into a dictionary. - """ - reboot_cause_dict = {} - reboot_cause_dict['gen_time'] = gen_time - reboot_cause_dict['cause'] = previous_reboot_cause - reboot_cause_dict['user'] = "N/A" - reboot_cause_dict['time'] = "N/A" - reboot_cause_dict['comment'] = comment if comment is not None else "N/A" - - if re.search(r'User issued', previous_reboot_cause): - # Match with "User issued '{}' command [User: {}, Time: {}]" - match = re.search(r'User issued \'(.*)\' command \[User: (.*), Time: (.*)\]', previous_reboot_cause) - if match is not None: - reboot_cause_dict['cause'] = match.group(1) - reboot_cause_dict['user'] = match.group(2) - reboot_cause_dict['time'] = match.group(3) - elif re.search(r'Kernel Panic', previous_reboot_cause): - match = re.search(r'Kernel Panic \[Time: (.*)\]', previous_reboot_cause) - if match is not None: - reboot_cause_dict['cause'] = "Kernel Panic" - reboot_cause_dict['time'] = match.group(1) - - return reboot_cause_dict - - -def main(): - # Configure logger to log all messages INFO level and higher - sonic_logger.set_min_log_priority_info() - - sonic_logger.log_info("Starting up...") - - if not os.geteuid() == 0: - sonic_logger.log_error("User {} does not have permission to execute".format(pwd.getpwuid(os.getuid()).pw_name)) - sys.exit("This utility must be run as root") - - # Create REBOOT_CAUSE_DIR if it doesn't exist - if not os.path.exists(REBOOT_CAUSE_DIR): - os.makedirs(REBOOT_CAUSE_DIR) - - # Remove stale PREVIOUS_REBOOT_CAUSE_FILE if it exists - if os.path.exists(PREVIOUS_REBOOT_CAUSE_FILE): - os.remove(PREVIOUS_REBOOT_CAUSE_FILE) - - # This variable is kept for future-use purpose. When proc_cmd_line/vendor/software provides - # any additional_reboot_info it will be stored as a "comment" in REBOOT_CAUSE_HISTORY_FILE - additional_reboot_info = "N/A" - - # Check if the previous reboot was warm/fast reboot by testing whether there is "fast|fastfast|warm" in /proc/cmdline - proc_cmdline_reboot_cause = find_proc_cmdline_reboot_cause() - - # If /proc/cmdline does not indicate reboot cause, check if the previous reboot was caused by hardware - if proc_cmdline_reboot_cause is None: - previous_reboot_cause = find_hardware_reboot_cause() - if previous_reboot_cause.startswith(REBOOT_CAUSE_NON_HARDWARE): - # If the reboot cause is non-hardware, get the reboot cause from REBOOT_CAUSE_FILE - previous_reboot_cause = find_software_reboot_cause() - else: - # Get the reboot cause from REBOOT_CAUSE_FILE - previous_reboot_cause = find_software_reboot_cause() - - # Current time - reboot_cause_gen_time = str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) - - # Save the previous cause info into its history file as json format - reboot_cause_dict = get_reboot_cause_dict(previous_reboot_cause, additional_reboot_info, reboot_cause_gen_time) - - # Create reboot-cause-#time#.json under history directory - REBOOT_CAUSE_HISTORY_FILE = os.path.join(REBOOT_CAUSE_HISTORY_DIR, "reboot-cause-{}.json".format(reboot_cause_gen_time)) - - # Create REBOOT_CAUSE_HISTORY_DIR if it doesn't exist - if not os.path.exists(REBOOT_CAUSE_HISTORY_DIR): - os.makedirs(REBOOT_CAUSE_HISTORY_DIR) - - # Write the previous reboot cause to REBOOT_CAUSE_HISTORY_FILE as a JSON format - with open(REBOOT_CAUSE_HISTORY_FILE, "w") as reboot_cause_history_file: - json.dump(reboot_cause_dict, reboot_cause_history_file) - - # Create a symbolic link to previous-reboot-cause.json file - os.symlink(REBOOT_CAUSE_HISTORY_FILE, PREVIOUS_REBOOT_CAUSE_FILE) - - - # Remove the old REBOOT_CAUSE_FILE - if os.path.exists(REBOOT_CAUSE_FILE): - os.remove(REBOOT_CAUSE_FILE) - - # Write a new default reboot cause file for the next reboot - with open(REBOOT_CAUSE_FILE, "w") as cause_file: - cause_file.write(REBOOT_CAUSE_UNKNOWN) - - -if __name__ == "__main__": - main() diff --git a/src/sonic-host-services/scripts/hostcfgd b/src/sonic-host-services/scripts/hostcfgd deleted file mode 100755 index a82a630bfc..0000000000 --- a/src/sonic-host-services/scripts/hostcfgd +++ /dev/null @@ -1,1471 +0,0 @@ -#!/usr/bin/env python3 - -import ast -import copy -import ipaddress -import os -import sys -import subprocess -import syslog -import signal -import re -import jinja2 -from sonic_py_common import device_info -from swsscommon.swsscommon import ConfigDBConnector, DBConnector, Table - -# FILE -PAM_AUTH_CONF = "/etc/pam.d/common-auth-sonic" -PAM_AUTH_CONF_TEMPLATE = "/usr/share/sonic/templates/common-auth-sonic.j2" -PAM_PASSWORD_CONF = "/etc/pam.d/common-password" -PAM_PASSWORD_CONF_TEMPLATE = "/usr/share/sonic/templates/common-password.j2" -NSS_TACPLUS_CONF = "/etc/tacplus_nss.conf" -NSS_TACPLUS_CONF_TEMPLATE = "/usr/share/sonic/templates/tacplus_nss.conf.j2" -NSS_RADIUS_CONF = "/etc/radius_nss.conf" -NSS_RADIUS_CONF_TEMPLATE = "/usr/share/sonic/templates/radius_nss.conf.j2" -PAM_RADIUS_AUTH_CONF_TEMPLATE = "/usr/share/sonic/templates/pam_radius_auth.conf.j2" -NSS_CONF = "/etc/nsswitch.conf" -ETC_PAMD_SSHD = "/etc/pam.d/sshd" -ETC_PAMD_LOGIN = "/etc/pam.d/login" -ETC_LOGIN_DEF = "/etc/login.defs" - -# Linux login.def default values (password hardening disable) -LINUX_DEFAULT_PASS_MAX_DAYS = 99999 -LINUX_DEFAULT_PASS_WARN_AGE = 7 - -ACCOUNT_NAME = 0 # index of account name -AGE_DICT = { 'MAX_DAYS': {'REGEX_DAYS': r'^PASS_MAX_DAYS[ \t]*(?P\d*)', 'DAYS': 'max_days', 'CHAGE_FLAG': '-M '}, - 'WARN_DAYS': {'REGEX_DAYS': r'^PASS_WARN_AGE[ \t]*(?P\d*)', 'DAYS': 'warn_days', 'CHAGE_FLAG': '-W '} - } -PAM_LIMITS_CONF_TEMPLATE = "/usr/share/sonic/templates/pam_limits.j2" -LIMITS_CONF_TEMPLATE = "/usr/share/sonic/templates/limits.conf.j2" -PAM_LIMITS_CONF = "/etc/pam.d/pam-limits-conf" -LIMITS_CONF = "/etc/security/limits.conf" - -# TACACS+ -TACPLUS_SERVER_PASSKEY_DEFAULT = "" -TACPLUS_SERVER_TIMEOUT_DEFAULT = "5" -TACPLUS_SERVER_AUTH_TYPE_DEFAULT = "pap" - -# RADIUS -RADIUS_SERVER_AUTH_PORT_DEFAULT = "1812" -RADIUS_SERVER_PASSKEY_DEFAULT = "" -RADIUS_SERVER_RETRANSMIT_DEFAULT = "3" -RADIUS_SERVER_TIMEOUT_DEFAULT = "5" -RADIUS_SERVER_AUTH_TYPE_DEFAULT = "pap" -RADIUS_PAM_AUTH_CONF_DIR = "/etc/pam_radius_auth.d/" - -# MISC Constants -CFG_DB = "CONFIG_DB" -STATE_DB = "STATE_DB" -HOSTCFGD_MAX_PRI = 10 # Used to enforce ordering b/w daemons under Hostcfgd -DEFAULT_SELECT_TIMEOUT = 1000 - - -def safe_eval(val, default_value=False): - """ Safely evaluate the expression, without raising an exception """ - try: - ret = ast.literal_eval(val) - except ValueError: - ret = default_value - return ret - - -def signal_handler(sig, frame): - if sig == signal.SIGHUP: - syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGHUP' is caught and ignoring..") - elif sig == signal.SIGINT: - syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGINT' is caught and exiting...") - sys.exit(128 + sig) - elif sig == signal.SIGTERM: - syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGTERM' is caught and exiting...") - sys.exit(128 + sig) - else: - syslog.syslog(syslog.LOG_INFO, "HostCfgd: invalid signal - ignoring..") - - -def run_cmd(cmd, log_err=True, raise_exception=False): - try: - subprocess.check_call(cmd, shell=True) - except Exception as err: - if log_err: - syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}" - .format(err.cmd, err.returncode, err.output)) - if raise_exception: - raise - - -def is_true(val): - if val == 'True' or val == 'true': - return True - elif val == 'False' or val == 'false': - return False - syslog.syslog(syslog.LOG_ERR, "Failed to get bool value, instead val= {}".format(val)) - return False - - -def is_vlan_sub_interface(ifname): - ifname_split = ifname.split(".") - return (len(ifname_split) == 2) - - -def sub(l, start, end): - return l[start:end] - - -def obfuscate(data): - if data: - return data[0] + '*****' - else: - return data - - -def get_pid(procname): - for dirname in os.listdir('/proc'): - if dirname == 'curproc': - continue - try: - with open('/proc/{}/cmdline'.format(dirname), mode='r') as fd: - content = fd.read() - except Exception as ex: - continue - if procname in content: - return dirname - return "" - - -class Feature(object): - """ Represents a feature configuration from CONFIG_DB data. """ - - def __init__(self, feature_name, feature_cfg, device_config=None): - """ Initialize Feature object based on CONFIG_DB data. - - Args: - feature_name (str): Feature name string - feature_cfg (dict): Feature CONFIG_DB configuration - deviec_config (dict): DEVICE_METADATA section of CONFIG_DB - """ - - self.name = feature_name - self.state = self._get_target_state(feature_cfg.get('state'), device_config or {}) - self.auto_restart = feature_cfg.get('auto_restart', 'disabled') - self.has_timer = safe_eval(feature_cfg.get('has_timer', 'False')) - self.has_global_scope = safe_eval(feature_cfg.get('has_global_scope', 'True')) - self.has_per_asic_scope = safe_eval(feature_cfg.get('has_per_asic_scope', 'False')) - - def _get_target_state(self, state_configuration, device_config): - """ Returns the target state for the feature by rendering the state field as J2 template. - - Args: - state_configuration (str): State configuration from CONFIG_DB - deviec_config (dict): DEVICE_METADATA section of CONFIG_DB - Returns: - (str): Target feature state - """ - - if state_configuration is None: - return None - - template = jinja2.Template(state_configuration) - target_state = template.render(device_config) - if target_state not in ('enabled', 'disabled', 'always_enabled', 'always_disabled'): - raise ValueError('Invalid state rendered for feature {}: {}'.format(self.name, target_state)) - return target_state - - def compare_state(self, feature_name, feature_cfg): - if self.name != feature_name or not isinstance(feature_cfg, dict): - return False - - if self.state != feature_cfg.get('state', ''): - return False - return True - - -class FeatureHandler(object): - """ Handles FEATURE table updates. """ - - SYSTEMD_SYSTEM_DIR = '/etc/systemd/system/' - SYSTEMD_SERVICE_CONF_DIR = os.path.join(SYSTEMD_SYSTEM_DIR, '{}.service.d/') - - # Feature state constants - FEATURE_STATE_ENABLED = "enabled" - FEATURE_STATE_DISABLED = "disabled" - FEATURE_STATE_FAILED = "failed" - - def __init__(self, config_db, feature_state_table, device_config): - self._config_db = config_db - self._feature_state_table = feature_state_table - self._device_config = device_config - self._cached_config = {} - self.is_multi_npu = device_info.is_multi_npu() - - def handler(self, feature_name, op, feature_cfg): - if not feature_cfg: - syslog.syslog(syslog.LOG_INFO, "Deregistering feature {}".format(feature_name)) - self._cached_config.pop(feature_name, None) - self._feature_state_table._del(feature_name) - return - - feature = Feature(feature_name, feature_cfg, self._device_config) - self._cached_config.setdefault(feature_name, Feature(feature_name, {})) - - # Change auto-restart configuration first. - # If service reached failed state before this configuration applies (e.g. on boot) - # the next called self.update_feature_state will start it again. If it will fail - # again the auto restart will kick-in. Another order may leave it in failed state - # and not auto restart. - if self._cached_config[feature_name].auto_restart != feature.auto_restart: - syslog.syslog(syslog.LOG_INFO, "Auto-restart status of feature '{}' is changed from '{}' to '{}' ..." - .format(feature_name, self._cached_config[feature_name].auto_restart, feature.auto_restart)) - self.update_systemd_config(feature) - self._cached_config[feature_name].auto_restart = feature.auto_restart - - # Enable/disable the container service if the feature state was changed from its previous state. - if self._cached_config[feature_name].state != feature.state: - if self.update_feature_state(feature): - self._cached_config[feature_name].state = feature.state - else: - self.resync_feature_state(self._cached_config[feature_name]) - - def sync_state_field(self, feature_table): - """ - Summary: - Updates the state field in the FEATURE|* tables as the state field - might have to be rendered based on DEVICE_METADATA table - """ - for feature_name in feature_table.keys(): - if not feature_name: - syslog.syslog(syslog.LOG_WARNING, "Feature is None") - continue - - feature = Feature(feature_name, feature_table[feature_name], self._device_config) - - self._cached_config.setdefault(feature_name, feature) - self.update_systemd_config(feature) - self.update_feature_state(feature) - self.resync_feature_state(feature) - - def update_feature_state(self, feature): - cached_feature = self._cached_config[feature.name] - enable = False - disable = False - - # Allowed transitions: - # None -> always_enabled - # -> always_disabled - # -> enabled - # -> disabled - # always_enabled -> always_disabled - # enabled -> disabled - # disabled -> enabled - if cached_feature.state is None: - enable = feature.state in ("always_enabled", "enabled") - disable = feature.state in ("always_disabled", "disabled") - elif cached_feature.state in ("always_enabled", "always_disabled"): - disable = feature.state == "always_disabled" - enable = feature.state == "always_enabled" - elif cached_feature.state in ("enabled", "disabled"): - enable = feature.state == "enabled" - disable = feature.state == "disabled" - else: - syslog.syslog(syslog.LOG_INFO, "Feature {} service is {}".format(feature.name, cached_feature.state)) - return False - - if not enable and not disable: - syslog.syslog(syslog.LOG_ERR, "Unexpected state value '{}' for feature {}" - .format(feature.state, feature.name)) - return False - - if enable: - self.enable_feature(feature) - syslog.syslog(syslog.LOG_INFO, "Feature {} is enabled and started".format(feature.name)) - - if disable: - self.disable_feature(feature) - syslog.syslog(syslog.LOG_INFO, "Feature {} is stopped and disabled".format(feature.name)) - - return True - - def update_systemd_config(self, feature_config): - """Updates `Restart=` field in feature's systemd configuration file - according to the value of `auto_restart` field in `FEATURE` table of `CONFIG_DB`. - - Args: - feature: An object represents a feature's configuration in `FEATURE` - table of `CONFIG_DB`. - - Returns: - None. - """ - restart_field_str = "always" if "enabled" in feature_config.auto_restart else "no" - feature_systemd_config = "[Service]\nRestart={}\n".format(restart_field_str) - feature_names, feature_suffixes = self.get_multiasic_feature_instances(feature_config) - - # On multi-ASIC device, creates systemd configuration file for each feature instance - # residing in difference namespace. - for feature_name in feature_names: - syslog.syslog(syslog.LOG_INFO, "Updating feature '{}' systemd config file related to auto-restart ..." - .format(feature_name)) - feature_systemd_config_dir_path = self.SYSTEMD_SERVICE_CONF_DIR.format(feature_name) - feature_systemd_config_file_path = os.path.join(feature_systemd_config_dir_path, 'auto_restart.conf') - - if not os.path.exists(feature_systemd_config_dir_path): - os.mkdir(feature_systemd_config_dir_path) - with open(feature_systemd_config_file_path, 'w') as feature_systemd_config_file_handler: - feature_systemd_config_file_handler.write(feature_systemd_config) - - syslog.syslog(syslog.LOG_INFO, "Feautre '{}' systemd config file related to auto-restart is updated!" - .format(feature_name)) - - try: - syslog.syslog(syslog.LOG_INFO, "Reloading systemd configuration files ...") - run_cmd("sudo systemctl daemon-reload", raise_exception=True) - syslog.syslog(syslog.LOG_INFO, "Systemd configuration files are reloaded!") - except Exception as err: - syslog.syslog(syslog.LOG_ERR, "Failed to reload systemd configuration files!") - - def get_multiasic_feature_instances(self, feature): - # Create feature name suffix depending feature is running in host or namespace or in both - feature_names = ( - ([feature.name] if feature.has_global_scope or not self.is_multi_npu else []) + - ([(feature.name + '@' + str(asic_inst)) for asic_inst in range(device_info.get_num_npus()) - if feature.has_per_asic_scope and self.is_multi_npu]) - ) - - if not feature_names: - syslog.syslog(syslog.LOG_ERR, "Feature '{}' service not available" - .format(feature.name)) - - feature_suffixes = ["service"] + (["timer"] if feature.has_timer else []) - - return feature_names, feature_suffixes - - def get_systemd_unit_state(self, unit): - """ Returns service configuration """ - - cmd = "sudo systemctl show {} --property UnitFileState".format(unit) - proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = proc.communicate() - if proc.returncode != 0: - syslog.syslog(syslog.LOG_ERR, "Failed to get status of {}: rc={} stderr={}".format(unit, proc.returncode, stderr)) - return 'invalid' # same as systemd's "invalid indicates that it could not be determined whether the unit file is enabled". - - props = dict([line.split("=") for line in stdout.decode().strip().splitlines()]) - return props["UnitFileState"] - - def enable_feature(self, feature): - cmds = [] - feature_names, feature_suffixes = self.get_multiasic_feature_instances(feature) - for feature_name in feature_names: - # Check if it is already enabled, if yes skip the system call - unit_file_state = self.get_systemd_unit_state("{}.{}".format(feature_name, feature_suffixes[-1])) - if unit_file_state == "enabled": - continue - - for suffix in feature_suffixes: - cmds.append("sudo systemctl unmask {}.{}".format(feature_name, suffix)) - - # If feature has timer associated with it, start/enable corresponding systemd .timer unit - # otherwise, start/enable corresponding systemd .service unit - - cmds.append("sudo systemctl enable {}.{}".format(feature_name, feature_suffixes[-1])) - cmds.append("sudo systemctl start {}.{}".format(feature_name, feature_suffixes[-1])) - - for cmd in cmds: - syslog.syslog(syslog.LOG_INFO, "Running cmd: '{}'".format(cmd)) - try: - run_cmd(cmd, raise_exception=True) - except Exception as err: - syslog.syslog(syslog.LOG_ERR, "Feature '{}.{}' failed to be enabled and started" - .format(feature.name, feature_suffixes[-1])) - self.set_feature_state(feature, self.FEATURE_STATE_FAILED) - return - - self.set_feature_state(feature, self.FEATURE_STATE_ENABLED) - - def disable_feature(self, feature): - cmds = [] - feature_names, feature_suffixes = self.get_multiasic_feature_instances(feature) - for feature_name in feature_names: - # Check if it is already disabled, if yes skip the system call - unit_file_state = self.get_systemd_unit_state("{}.{}".format(feature_name, feature_suffixes[-1])) - if unit_file_state in ("disabled", "masked"): - continue - - for suffix in reversed(feature_suffixes): - cmds.append("sudo systemctl stop {}.{}".format(feature_name, suffix)) - cmds.append("sudo systemctl disable {}.{}".format(feature_name, feature_suffixes[-1])) - cmds.append("sudo systemctl mask {}.{}".format(feature_name, feature_suffixes[-1])) - for cmd in cmds: - syslog.syslog(syslog.LOG_INFO, "Running cmd: '{}'".format(cmd)) - try: - run_cmd(cmd, raise_exception=True) - except Exception as err: - syslog.syslog(syslog.LOG_ERR, "Feature '{}.{}' failed to be stopped and disabled" - .format(feature.name, feature_suffixes[-1])) - self.set_feature_state(feature, self.FEATURE_STATE_FAILED) - return - - self.set_feature_state(feature, self.FEATURE_STATE_DISABLED) - - def resync_feature_state(self, feature): - self._config_db.mod_entry('FEATURE', feature.name, {'state': feature.state}) - - def set_feature_state(self, feature, state): - self._feature_state_table.set(feature.name, [('state', state)]) - - -class Iptables(object): - def __init__(self): - ''' - Default MSS to 1460 - (MTU 1500 - 40 (TCP/IP Overhead)) - For IPv6, it would be 1440 - (MTU 1500 - 60 octects) - ''' - self.tcpmss = 1460 - self.tcp6mss = 1440 - - def is_ip_prefix_in_key(self, key): - ''' - Function to check if IP address is present in the key. If it - is present, then the key would be a tuple or else, it shall be - be string - ''' - return (isinstance(key, tuple)) - - def load(self, lpbk_table): - for row in lpbk_table: - self.iptables_handler(row, lpbk_table[row]) - - def command(self, chain, ip, ver, op): - cmd = 'iptables' if ver == '4' else 'ip6tables' - cmd += ' -t mangle --{} {} -p tcp --tcp-flags SYN SYN'.format(op, chain) - cmd += ' -d' if chain == 'PREROUTING' else ' -s' - mss = self.tcpmss if ver == '4' else self.tcp6mss - cmd += ' {} -j TCPMSS --set-mss {}'.format(ip, mss) - - return cmd - - def iptables_handler(self, key, data, add=True): - if not self.is_ip_prefix_in_key(key): - return - - iface, ip = key - ip_str = ip.split("/")[0] - ip_addr = ipaddress.ip_address(ip_str) - if isinstance(ip_addr, ipaddress.IPv6Address): - ver = '6' - else: - ver = '4' - - self.mangle_handler(ip_str, ver, add) - - def mangle_handler(self, ip, ver, add): - if not add: - op = 'delete' - else: - op = 'check' - - iptables_cmds = [] - chains = ['PREROUTING', 'POSTROUTING'] - for chain in chains: - cmd = self.command(chain, ip, ver, op) - if not add: - iptables_cmds.append(cmd) - else: - ''' - For add case, first check if rule exists. Iptables just appends to the chain - as a new rule even if it is the same as an existing one. Check this and - do nothing if rule exists - ''' - ret = subprocess.call(cmd, shell=True) - if ret == 0: - syslog.syslog(syslog.LOG_INFO, "{} rule exists in {}".format(ip, chain)) - else: - # Modify command from Check to Append - iptables_cmds.append(cmd.replace("check", "append")) - - for cmd in iptables_cmds: - syslog.syslog(syslog.LOG_INFO, "Running cmd - {}".format(cmd)) - run_cmd(cmd) - - -class AaaCfg(object): - def __init__(self): - self.authentication_default = { - 'login': 'local', - } - self.authorization_default = { - 'login': 'local', - } - self.accounting_default = { - 'login': 'disable', - } - self.tacplus_global_default = { - 'auth_type': TACPLUS_SERVER_AUTH_TYPE_DEFAULT, - 'timeout': TACPLUS_SERVER_TIMEOUT_DEFAULT, - 'passkey': TACPLUS_SERVER_PASSKEY_DEFAULT - } - self.tacplus_global = {} - self.tacplus_servers = {} - - self.radius_global_default = { - 'priority': 0, - 'auth_port': RADIUS_SERVER_AUTH_PORT_DEFAULT, - 'auth_type': RADIUS_SERVER_AUTH_TYPE_DEFAULT, - 'retransmit': RADIUS_SERVER_RETRANSMIT_DEFAULT, - 'timeout': RADIUS_SERVER_TIMEOUT_DEFAULT, - 'passkey': RADIUS_SERVER_PASSKEY_DEFAULT - } - self.radius_global = {} - self.radius_servers = {} - - self.authentication = {} - self.authorization = {} - self.accounting = {} - self.debug = False - self.trace = False - - self.hostname = "" - - # Load conf from ConfigDb - def load(self, aaa_conf, tac_global_conf, tacplus_conf, rad_global_conf, radius_conf): - for row in aaa_conf: - self.aaa_update(row, aaa_conf[row], modify_conf=False) - for row in tac_global_conf: - self.tacacs_global_update(row, tac_global_conf[row], modify_conf=False) - for row in tacplus_conf: - self.tacacs_server_update(row, tacplus_conf[row], modify_conf=False) - - for row in rad_global_conf: - self.radius_global_update(row, rad_global_conf[row], modify_conf=False) - for row in radius_conf: - self.radius_server_update(row, radius_conf[row], modify_conf=False) - - self.modify_conf_file() - - def aaa_update(self, key, data, modify_conf=True): - if key == 'authentication': - self.authentication = data - if 'failthrough' in data: - self.authentication['failthrough'] = is_true(data['failthrough']) - if 'debug' in data: - self.debug = is_true(data['debug']) - if key == 'authorization': - self.authorization = data - if key == 'accounting': - self.accounting = data - if modify_conf: - self.modify_conf_file() - - def pick_src_intf_ipaddrs(self, keys, src_intf): - new_ipv4_addr = "" - new_ipv6_addr = "" - - for it in keys: - if src_intf != it[0] or (isinstance(it, tuple) == False): - continue - if new_ipv4_addr != "" and new_ipv6_addr != "": - break - ip_str = it[1].split("/")[0] - ip_addr = ipaddress.IPAddress(ip_str) - # Pick the first IP address from the table that matches the source interface - if isinstance(ip_addr, ipaddress.IPv6Address): - if new_ipv6_addr != "": - continue - new_ipv6_addr = ip_str - else: - if new_ipv4_addr != "": - continue - new_ipv4_addr = ip_str - - return(new_ipv4_addr, new_ipv6_addr) - - def tacacs_global_update(self, key, data, modify_conf=True): - if key == 'global': - self.tacplus_global = data - if modify_conf: - self.modify_conf_file() - - def tacacs_server_update(self, key, data, modify_conf=True): - if data == {}: - if key in self.tacplus_servers: - del self.tacplus_servers[key] - else: - self.tacplus_servers[key] = data - - if modify_conf: - self.modify_conf_file() - - def notify_audisp_tacplus_reload_config(self): - pid = get_pid("/sbin/audisp-tacplus") - syslog.syslog(syslog.LOG_INFO, "Found audisp-tacplus PID: {}".format(pid)) - if pid == "": - return - - # audisp-tacplus will reload TACACS+ config when receive SIGHUP - try: - os.kill(int(pid), signal.SIGHUP) - except Exception as ex: - syslog.syslog(syslog.LOG_WARNING, "Send SIGHUP to audisp-tacplus failed with exception: {}".format(ex)) - - def handle_radius_source_intf_ip_chg(self, key): - modify_conf=False - if 'src_intf' in self.radius_global: - if key[0] == self.radius_global['src_intf']: - modify_conf=True - for addr in self.radius_servers: - if ('src_intf' in self.radius_servers[addr]) and \ - (key[0] == self.radius_servers[addr]['src_intf']): - modify_conf=True - break - - if not modify_conf: - return - - syslog.syslog(syslog.LOG_INFO, 'RADIUS IP change - key:{}, current server info {}'.format(key, self.radius_servers)) - self.modify_conf_file() - - def handle_radius_nas_ip_chg(self, key): - modify_conf=False - # Mgmt IP configuration affects only the default nas_ip - if 'nas_ip' not in self.radius_global: - for addr in self.radius_servers: - if 'nas_ip' not in self.radius_servers[addr]: - modify_conf=True - break - - if not modify_conf: - return - - syslog.syslog(syslog.LOG_INFO, 'RADIUS (NAS) IP change - key:{}, current global info {}'.format(key, self.radius_global)) - self.modify_conf_file() - - def radius_global_update(self, key, data, modify_conf=True): - if key == 'global': - self.radius_global = data - if 'statistics' in data: - self.radius_global['statistics'] = is_true(data['statistics']) - if modify_conf: - self.modify_conf_file() - - def radius_server_update(self, key, data, modify_conf=True): - if data == {}: - if key in self.radius_servers: - del self.radius_servers[key] - else: - self.radius_servers[key] = data - - if modify_conf: - self.modify_conf_file() - - def hostname_update(self, hostname, modify_conf=True): - if self.hostname == hostname: - return - - self.hostname = hostname - - # Currently only used for RADIUS - if len(self.radius_servers) == 0: - return - - if modify_conf: - self.modify_conf_file() - - def get_hostname(self): - return self.hostname - - def get_interface_ip(self, source, addr=None): - keys = None - try: - if source.startswith("Eth"): - if is_vlan_sub_interface(source): - keys = self.config_db.get_keys('VLAN_SUB_INTERFACE') - else: - keys = self.config_db.get_keys('INTERFACE') - elif source.startswith("Po"): - if is_vlan_sub_interface(source): - keys = self.config_db.get_keys('VLAN_SUB_INTERFACE') - else: - keys = self.config_db.get_keys('PORTCHANNEL_INTERFACE') - elif source.startswith("Vlan"): - keys = self.config_db.get_keys('VLAN_INTERFACE') - elif source.startswith("Loopback"): - keys = self.config_db.get_keys('LOOPBACK_INTERFACE') - elif source == "eth0": - keys = self.config_db.get_keys('MGMT_INTERFACE') - except Exception as e: - pass - - interface_ip = "" - if keys != None: - ipv4_addr, ipv6_addr = self.pick_src_intf_ipaddrs(keys, source) - # Based on the type of addr, return v4 or v6 - if addr and isinstance(addr, ipaddress.IPv6Address): - interface_ip = ipv6_addr - else: - # This could be tuned, but that involves a DNS query, so - # offline configuration might trip (or cause delays). - interface_ip = ipv4_addr - return interface_ip - - def modify_single_file(self, filename, operations=None): - if operations: - cmd = "sed -e {0} {1} > {1}.new; mv -f {1} {1}.old; mv -f {1}.new {1}".format(' -e '.join(operations), filename) - os.system(cmd) - - def modify_conf_file(self): - authentication = self.authentication_default.copy() - authentication.update(self.authentication) - authorization = self.authorization_default.copy() - authorization.update(self.authorization) - accounting = self.accounting_default.copy() - accounting.update(self.accounting) - tacplus_global = self.tacplus_global_default.copy() - tacplus_global.update(self.tacplus_global) - if 'src_ip' in tacplus_global: - src_ip = tacplus_global['src_ip'] - else: - src_ip = None - - servers_conf = [] - if self.tacplus_servers: - for addr in self.tacplus_servers: - server = tacplus_global.copy() - server['ip'] = addr - server.update(self.tacplus_servers[addr]) - servers_conf.append(server) - servers_conf = sorted(servers_conf, key=lambda t: int(t['priority']), reverse=True) - - radius_global = self.radius_global_default.copy() - radius_global.update(self.radius_global) - - # RADIUS: Set the default nas_ip, and nas_id - if 'nas_ip' not in radius_global: - nas_ip = self.get_interface_ip("eth0") - if len(nas_ip) > 0: - radius_global['nas_ip'] = nas_ip - if 'nas_id' not in radius_global: - nas_id = self.get_hostname() - if len(nas_id) > 0: - radius_global['nas_id'] = nas_id - - radsrvs_conf = [] - if self.radius_servers: - for addr in self.radius_servers: - server = radius_global.copy() - server['ip'] = addr - server.update(self.radius_servers[addr]) - - if 'src_intf' in server: - # RADIUS: Log a message if src_ip is already defined. - if 'src_ip' in server: - syslog.syslog(syslog.LOG_INFO, \ - "RADIUS_SERVER|{}: src_intf found. Ignoring src_ip".format(addr)) - # RADIUS: If server.src_intf, then get the corresponding - # src_ip based on the server.ip, and set it. - src_ip = self.get_interface_ip(server['src_intf'], addr) - if len(src_ip) > 0: - server['src_ip'] = src_ip - elif 'src_ip' in server: - syslog.syslog(syslog.LOG_INFO, \ - "RADIUS_SERVER|{}: src_intf has no usable IP addr.".format(addr)) - del server['src_ip'] - - radsrvs_conf.append(server) - radsrvs_conf = sorted(radsrvs_conf, key=lambda t: int(t['priority']), reverse=True) - - template_file = os.path.abspath(PAM_AUTH_CONF_TEMPLATE) - env = jinja2.Environment(loader=jinja2.FileSystemLoader('/'), trim_blocks=True) - env.filters['sub'] = sub - template = env.get_template(template_file) - if 'radius' in authentication['login']: - pam_conf = template.render(debug=self.debug, trace=self.trace, auth=authentication, servers=radsrvs_conf) - else: - pam_conf = template.render(auth=authentication, src_ip=src_ip, servers=servers_conf) - - # Use rename(), which is atomic (on the same fs) to avoid empty file - with open(PAM_AUTH_CONF + ".tmp", 'w') as f: - f.write(pam_conf) - os.chmod(PAM_AUTH_CONF + ".tmp", 0o644) - os.rename(PAM_AUTH_CONF + ".tmp", PAM_AUTH_CONF) - - # Modify common-auth include file in /etc/pam.d/login, sshd. - # /etc/pam.d/sudo is not handled, because it would change the existing - # behavior. It can be modified once a config knob is added for sudo. - if os.path.isfile(PAM_AUTH_CONF): - self.modify_single_file(ETC_PAMD_SSHD, [ "'/^@include/s/common-auth$/common-auth-sonic/'" ]) - self.modify_single_file(ETC_PAMD_LOGIN, [ "'/^@include/s/common-auth$/common-auth-sonic/'" ]) - else: - self.modify_single_file(ETC_PAMD_SSHD, [ "'/^@include/s/common-auth-sonic$/common-auth/'" ]) - self.modify_single_file(ETC_PAMD_LOGIN, [ "'/^@include/s/common-auth-sonic$/common-auth/'" ]) - - # Add tacplus/radius in nsswitch.conf if TACACS+/RADIUS enable - if 'tacacs+' in authentication['login']: - if os.path.isfile(NSS_CONF): - self.modify_single_file(NSS_CONF, [ "'/^passwd/s/ radius//'" ]) - self.modify_single_file(NSS_CONF, [ "'/tacplus/b'", "'/^passwd/s/compat/tacplus &/'", "'/^passwd/s/files/tacplus &/'" ]) - elif 'radius' in authentication['login']: - if os.path.isfile(NSS_CONF): - self.modify_single_file(NSS_CONF, [ "'/^passwd/s/tacplus //'" ]) - self.modify_single_file(NSS_CONF, [ "'/radius/b'", "'/^passwd/s/compat/& radius/'", "'/^passwd/s/files/& radius/'" ]) - else: - if os.path.isfile(NSS_CONF): - self.modify_single_file(NSS_CONF, [ "'/^passwd/s/tacplus //g'" ]) - self.modify_single_file(NSS_CONF, [ "'/^passwd/s/ radius//'" ]) - - # Add tacplus authorization configration in nsswitch.conf - tacacs_authorization_conf = None - local_authorization_conf = None - if 'tacacs+' in authorization['login']: - tacacs_authorization_conf = "on" - if 'local' in authorization['login']: - local_authorization_conf = "on" - - # Add tacplus accounting configration in nsswitch.conf - tacacs_accounting_conf = None - local_accounting_conf = None - if 'tacacs+' in accounting['login']: - tacacs_accounting_conf = "on" - if 'local' in accounting['login']: - local_accounting_conf = "on" - - # Set tacacs+ server in nss-tacplus conf - template_file = os.path.abspath(NSS_TACPLUS_CONF_TEMPLATE) - template = env.get_template(template_file) - nss_tacplus_conf = template.render( - debug=self.debug, - src_ip=src_ip, - servers=servers_conf, - local_accounting=local_accounting_conf, - tacacs_accounting=tacacs_accounting_conf, - local_authorization=local_authorization_conf, - tacacs_authorization=tacacs_authorization_conf) - with open(NSS_TACPLUS_CONF, 'w') as f: - f.write(nss_tacplus_conf) - - # Notify auditd plugin to reload tacacs config. - self.notify_audisp_tacplus_reload_config() - - # Set debug in nss-radius conf - template_file = os.path.abspath(NSS_RADIUS_CONF_TEMPLATE) - template = env.get_template(template_file) - nss_radius_conf = template.render(debug=self.debug, trace=self.trace, servers=radsrvs_conf) - with open(NSS_RADIUS_CONF, 'w') as f: - f.write(nss_radius_conf) - - # Create the per server pam_radius_auth.conf - if radsrvs_conf: - for srv in radsrvs_conf: - # Configuration File - pam_radius_auth_file = RADIUS_PAM_AUTH_CONF_DIR + srv['ip'] + "_" + srv['auth_port'] + ".conf" - template_file = os.path.abspath(PAM_RADIUS_AUTH_CONF_TEMPLATE) - template = env.get_template(template_file) - pam_radius_auth_conf = template.render(server=srv) - - open(pam_radius_auth_file, 'a').close() - os.chmod(pam_radius_auth_file, 0o600) - with open(pam_radius_auth_file, 'w+') as f: - f.write(pam_radius_auth_conf) - - # Start the statistics service. Only RADIUS implemented - if ('radius' in authentication['login']) and ('statistics' in radius_global) and \ - radius_global['statistics']: - cmd = 'service aaastatsd start' - else: - cmd = 'service aaastatsd stop' - syslog.syslog(syslog.LOG_INFO, "cmd - {}".format(cmd)) - try: - subprocess.check_call(cmd, shell=True) - except subprocess.CalledProcessError as err: - syslog.syslog(syslog.LOG_ERR, - "{} - failed: return code - {}, output:\n{}" - .format(err.cmd, err.returncode, err.output)) - - -class PasswHardening(object): - def __init__(self): - self.passw_policies_default = {} - self.passw_policies = {} - - self.debug = False - self.trace = False - - def load(self, policies_conf): - for row in policies_conf: - self.passw_policies_update(row, policies_conf[row], modify_conf=False) - - self.modify_passw_conf_file() - - def passw_policies_update(self, key, data, modify_conf=True): - syslog.syslog(syslog.LOG_DEBUG, "passw_policies_update - key: {}".format(key)) - syslog.syslog(syslog.LOG_DEBUG, "passw_policies_update - data: {}".format(data)) - - if data == {}: - self.passw_policies = {} - else: - if 'reject_user_passw_match' in data: - data['reject_user_passw_match'] = is_true(data['reject_user_passw_match']) - if 'lower_class' in data: - data['lower_class'] = is_true(data['lower_class']) - if 'upper_class' in data: - data['upper_class'] = is_true(data['upper_class']) - if 'digits_class' in data: - data['digits_class'] = is_true(data['digits_class']) - if 'special_class' in data: - data['special_class'] = is_true(data['special_class']) - - if key == 'POLICIES': - self.passw_policies = data - - if modify_conf: - self.modify_passw_conf_file() - - def modify_single_file_inplace(self, filename, operations=None): - if operations: - cmd = "sed -i {0} {1}".format(' -i '.join(operations), filename) - syslog.syslog(syslog.LOG_DEBUG, "modify_single_file_inplace: cmd - {}".format(cmd)) - os.system(cmd) - - def set_passw_hardening_policies(self, passw_policies): - # Password Hardening flow - # When feature is enabled, the passw_policies from CONFIG_DB will be set in the pam files /etc/pam.d/common-password and /etc/login.def. - # When the feature is disabled, the files above will be generate with the linux default (without secured passw_policies). - syslog.syslog(syslog.LOG_DEBUG, "modify_conf_file: passw_policies - {}".format(passw_policies)) - - template_passwh_file = os.path.abspath(PAM_PASSWORD_CONF_TEMPLATE) - env = jinja2.Environment(loader=jinja2.FileSystemLoader('/'), trim_blocks=True) - env.filters['sub'] = sub - template_passwh = env.get_template(template_passwh_file) - - # Render common-password file with passw hardening policies if any. Other render without them. - pam_passwh_conf = template_passwh.render(debug=self.debug, passw_policies=passw_policies) - - # Use rename(), which is atomic (on the same fs) to avoid empty file - with open(PAM_PASSWORD_CONF + ".tmp", 'w') as f: - f.write(pam_passwh_conf) - os.chmod(PAM_PASSWORD_CONF + ".tmp", 0o644) - os.rename(PAM_PASSWORD_CONF + ".tmp", PAM_PASSWORD_CONF) - - # Age policy - # When feature disabled or age policy disabled, expiry days policy should be as linux default, other, accoriding CONFIG_DB. - curr_expiration = LINUX_DEFAULT_PASS_MAX_DAYS - curr_expiration_warning = LINUX_DEFAULT_PASS_WARN_AGE - - if passw_policies: - if 'state' in passw_policies: - if passw_policies['state'] == 'enabled': - if 'expiration' in passw_policies: - if int(self.passw_policies['expiration']) != 0: # value '0' meaning age policy is disabled - # the logic is to modify the expiration time according the last updated modificatiion - # - curr_expiration = int(passw_policies['expiration']) - - if 'expiration_warning' in passw_policies: - if int(self.passw_policies['expiration_warning']) != 0: # value '0' meaning age policy is disabled - curr_expiration_warning = int(passw_policies['expiration_warning']) - - if self.is_passwd_aging_expire_update(curr_expiration, 'MAX_DAYS'): - # Set aging policy for existing users - self.passwd_aging_expire_modify(curr_expiration, 'MAX_DAYS') - - # Aging policy for new users - self.modify_single_file_inplace(ETC_LOGIN_DEF, ["\'/^PASS_MAX_DAYS/c\PASS_MAX_DAYS " +str(curr_expiration)+"\'"]) - - if self.is_passwd_aging_expire_update(curr_expiration_warning, 'WARN_DAYS'): - # Aging policy for existing users - self.passwd_aging_expire_modify(curr_expiration_warning, 'WARN_DAYS') - - # Aging policy for new users - self.modify_single_file_inplace(ETC_LOGIN_DEF, ["\'/^PASS_WARN_AGE/c\PASS_WARN_AGE " +str(curr_expiration_warning)+"\'"]) - - def passwd_aging_expire_modify(self, curr_expiration, age_type): - normal_accounts = self.get_normal_accounts() - if not normal_accounts: - syslog.syslog(syslog.LOG_ERR,"failed, no normal users found in /etc/passwd") - return - chage_flag = AGE_DICT[age_type]['CHAGE_FLAG'] - for normal_account in normal_accounts: - try: - chage_p_m = subprocess.Popen(('chage', chage_flag + str(curr_expiration), normal_account), stdout=subprocess.PIPE) - return_code_chage_p_m = chage_p_m.poll() - if return_code_chage_p_m != 0: - syslog.syslog(syslog.LOG_ERR, "failed: return code - {}".format(return_code_chage_p_m)) - - except subprocess.CalledProcessError as e: - syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}".format(e.cmd, e.returncode, e.output)) - - def is_passwd_aging_expire_update(self, curr_expiration, age_type): - """ Function verify that the current age expiry policy values are equal from the old one - Return update_age_status 'True' value meaning that was a modification from the last time, and vice versa. - """ - update_age_status = False - days_num = None - regex_days = AGE_DICT[age_type]['REGEX_DAYS'] - days_type = AGE_DICT[age_type]['DAYS'] - if os.path.exists(ETC_LOGIN_DEF): - with open(ETC_LOGIN_DEF, 'r') as f: - login_def_data = f.readlines() - - for line in login_def_data: - m1 = re.match(regex_days, line) - if m1: - days_num = int(m1.group(days_type)) - break - - if curr_expiration != days_num: - update_age_status = True - - return update_age_status - - def get_normal_accounts(self): - # Get user list - try: - getent_out = subprocess.check_output(['getent', 'passwd']).decode('utf-8').split('\n') - except subprocess.CalledProcessError as err: - syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}".format(err.cmd, err.returncode, err.output)) - return False - - # Get range of normal users - REGEX_UID_MAX = r'^UID_MAX[ \t]*(?P\d*)' - REGEX_UID_MIN = r'^UID_MIN[ \t]*(?P\d*)' - uid_max = None - uid_min = None - if os.path.exists(ETC_LOGIN_DEF): - with open(ETC_LOGIN_DEF, 'r') as f: - login_def_data = f.readlines() - - for line in login_def_data: - m1 = re.match(REGEX_UID_MAX, line) - m2 = re.match(REGEX_UID_MIN, line) - if m1: - uid_max = int(m1.group("uid_max")) - if m2: - uid_min = int(m2.group("uid_min")) - - if not uid_max or not uid_min: - syslog.syslog(syslog.LOG_ERR,"failed, no UID_MAX/UID_MIN founded in login.def file") - return False - - # Get normal user list - normal_accounts = [] - for account in getent_out[0:-1]: # last item is always empty - account_spl = account.split(':') - account_number = int(account_spl[2]) - if account_number >= uid_min and account_number <= uid_max: - normal_accounts.append(account_spl[ACCOUNT_NAME]) - - normal_accounts.append('root') # root is also a candidate to be age modify. - return normal_accounts - - def modify_passw_conf_file(self): - passw_policies = self.passw_policies_default.copy() - passw_policies.update(self.passw_policies) - - # set new Password Hardening policies. - self.set_passw_hardening_policies(passw_policies) - - -class KdumpCfg(object): - def __init__(self, CfgDb): - self.config_db = CfgDb - self.kdump_defaults = { "enabled" : "false", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M", - "num_dumps": "3" } - - def load(self, kdump_table): - """ - Set the KDUMP table in CFG DB to kdump_defaults if not set by the user - """ - syslog.syslog(syslog.LOG_INFO, "KdumpCfg init ...") - kdump_conf = kdump_table.get("config", {}) - for row in self.kdump_defaults: - value = self.kdump_defaults.get(row) - if not kdump_conf.get(row): - self.config_db.mod_entry("KDUMP", "config", {row : value}) - - def kdump_update(self, key, data): - syslog.syslog(syslog.LOG_INFO, "Kdump global configuration update") - if key == "config": - # Admin mode - kdump_enabled = self.kdump_defaults["enabled"] - if data.get("enabled") is not None: - kdump_enabled = data.get("enabled") - if kdump_enabled.lower() == "true": - enabled = True - else: - enabled = False - if enabled: - run_cmd("sonic-kdump-config --enable") - else: - run_cmd("sonic-kdump-config --disable") - - # Memory configuration - memory = self.kdump_defaults["memory"] - if data.get("memory") is not None: - memory = data.get("memory") - run_cmd("sonic-kdump-config --memory " + memory) - - # Num dumps - num_dumps = self.kdump_defaults["num_dumps"] - if data.get("num_dumps") is not None: - num_dumps = data.get("num_dumps") - run_cmd("sonic-kdump-config --num_dumps " + num_dumps) - -class NtpCfg(object): - """ - NtpCfg Config Daemon - 1) ntp-config.service handles the configuration updates and then starts ntp.service - 2) Both of them start after all the feature services start - 3) Purpose of this daemon is to propagate runtime config changes in - NTP, NTP_SERVER and LOOPBACK_INTERFACE - """ - def __init__(self): - self.ntp_global = {} - self.ntp_servers = set() - - def load(self, ntp_global_conf, ntp_server_conf): - syslog.syslog(syslog.LOG_INFO, "NtpCfg load ...") - - for row in ntp_global_conf: - self.ntp_global_update(row, ntp_global_conf[row], is_load=True) - - # Force reload on init - self.ntp_server_update(0, None, is_load=True) - - def handle_ntp_source_intf_chg(self, intf_name): - # if no ntp server configured, do nothing - if not self.ntp_servers: - return - - # check only the intf configured as source interface - if intf_name not in self.ntp_global.get('src_intf', '').split(';'): - return - else: - # just restart ntp config - cmd = 'systemctl restart ntp-config' - run_cmd(cmd) - - def ntp_global_update(self, key, data, is_load=False): - syslog.syslog(syslog.LOG_INFO, 'NTP GLOBAL Update') - orig_src = self.ntp_global.get('src_intf', '') - orig_src_set = set(orig_src.split(";")) - orig_vrf = self.ntp_global.get('vrf', '') - - new_src = data.get('src_intf', '') - new_src_set = set(new_src.split(";")) - new_vrf = data.get('vrf', '') - - # Update the Local Cache - self.ntp_global = data - - # If initial load don't restart daemon - if is_load: return - - # check if ntp server configured, if not, do nothing - if not self.ntp_servers: - syslog.syslog(syslog.LOG_INFO, "No ntp server when global config change, do nothing") - return - - if orig_src_set != new_src_set: - syslog.syslog(syslog.LOG_INFO, "ntp global update for source intf old {} new {}, restarting ntp-config" - .format(orig_src_set, new_src_set)) - cmd = 'systemctl restart ntp-config' - run_cmd(cmd) - elif new_vrf != orig_vrf: - syslog.syslog(syslog.LOG_INFO, "ntp global update for vrf old {} new {}, restarting ntp service" - .format(orig_vrf, new_vrf)) - cmd = 'service ntp restart' - run_cmd(cmd) - - def ntp_server_update(self, key, op, is_load=False): - syslog.syslog(syslog.LOG_INFO, 'ntp server update key {}'.format(key)) - - restart_config = False - if not is_load: - if op == "SET" and key not in self.ntp_servers: - restart_config = True - self.ntp_servers.add(key) - elif op == "DEL" and key in self.ntp_servers: - restart_config = True - self.ntp_servers.remove(key) - else: - restart_config = True - - if restart_config: - cmd = 'systemctl restart ntp-config' - syslog.syslog(syslog.LOG_INFO, 'ntp server update, restarting ntp-config, ntp servers configured {}'.format(self.ntp_servers)) - run_cmd(cmd) - -class PamLimitsCfg(object): - """ - PamLimit Config Daemon - 1) The pam_limits PAM module sets limits on the system resources that can be obtained in a user-session. - 2) Purpose of this daemon is to render pam_limits config file. - """ - def __init__(self, config_db): - self.config_db = config_db - self.hwsku = "" - self.type = "" - - # Load config from ConfigDb and render config file/ - def update_config_file(self): - device_metadata = self.config_db.get_table('DEVICE_METADATA') - if "localhost" not in device_metadata: - return - - self.read_localhost_config(device_metadata["localhost"]) - self.render_conf_file() - - # Read localhost config - def read_localhost_config(self, localhost): - if "hwsku" in localhost: - self.hwsku = localhost["hwsku"] - else: - self.hwsku = "" - - if "type" in localhost: - self.type = localhost["type"] - else: - self.type = "" - - # Render pam_limits config files - def render_conf_file(self): - env = jinja2.Environment(loader=jinja2.FileSystemLoader('/'), trim_blocks=True) - env.filters['sub'] = sub - - try: - template_file = os.path.abspath(PAM_LIMITS_CONF_TEMPLATE) - template = env.get_template(template_file) - pam_limits_conf = template.render( - hwsku=self.hwsku, - type=self.type) - with open(PAM_LIMITS_CONF, 'w') as f: - f.write(pam_limits_conf) - - template_file = os.path.abspath(LIMITS_CONF_TEMPLATE) - template = env.get_template(template_file) - limits_conf = template.render( - hwsku=self.hwsku, - type=self.type) - with open(LIMITS_CONF, 'w') as f: - f.write(limits_conf) - except Exception as e: - syslog.syslog(syslog.LOG_ERR, - "modify pam_limits config file failed with exception: {}" - .format(e)) - -class HostConfigDaemon: - def __init__(self): - # Just a sanity check to verify if the CONFIG_DB has been initialized - # before moving forward - self.config_db = ConfigDBConnector() - self.config_db.connect(wait_for_init=True, retry_on=True) - syslog.syslog(syslog.LOG_INFO, 'ConfigDB connect success') - - # Load DEVICE metadata configurations - self.device_config = {} - self.device_config['DEVICE_METADATA'] = self.config_db.get_table('DEVICE_METADATA') - - # Load feature state table - self.state_db_conn = DBConnector(STATE_DB, 0) - feature_state_table = Table(self.state_db_conn, 'FEATURE') - - # Initialize KDump Config and set the config to default if nothing is provided - self.kdumpCfg = KdumpCfg(self.config_db) - - # Initialize IpTables - self.iptables = Iptables() - - # Intialize Feature Handler - self.feature_handler = FeatureHandler(self.config_db, feature_state_table, self.device_config) - - # Initialize Ntp Config Handler - self.ntpcfg = NtpCfg() - - self.is_multi_npu = device_info.is_multi_npu() - - # Initialize AAACfg - self.hostname_cache="" - self.aaacfg = AaaCfg() - - # Initialize PasswHardening - self.passwcfg = PasswHardening() - - # Initialize PamLimitsCfg - self.pamLimitsCfg = PamLimitsCfg(self.config_db) - self.pamLimitsCfg.update_config_file() - - def load(self, init_data): - features = init_data['FEATURE'] - aaa = init_data['AAA'] - tacacs_global = init_data['TACPLUS'] - tacacs_server = init_data['TACPLUS_SERVER'] - radius_global = init_data['RADIUS'] - radius_server = init_data['RADIUS_SERVER'] - lpbk_table = init_data['LOOPBACK_INTERFACE'] - ntp_server = init_data['NTP_SERVER'] - ntp_global = init_data['NTP'] - kdump = init_data['KDUMP'] - passwh = init_data['PASSW_HARDENING'] - - self.feature_handler.sync_state_field(features) - self.aaacfg.load(aaa, tacacs_global, tacacs_server, radius_global, radius_server) - self.iptables.load(lpbk_table) - self.ntpcfg.load(ntp_global, ntp_server) - self.kdumpCfg.load(kdump) - self.passwcfg.load(passwh) - - dev_meta = self.config_db.get_table('DEVICE_METADATA') - if 'localhost' in dev_meta: - if 'hostname' in dev_meta['localhost']: - self.hostname_cache = dev_meta['localhost']['hostname'] - - # Update AAA with the hostname - self.aaacfg.hostname_update(self.hostname_cache) - - def __get_intf_name(self, key): - if isinstance(key, tuple) and key: - intf = key[0] - else: - intf = key - return intf - - def aaa_handler(self, key, op, data): - self.aaacfg.aaa_update(key, data) - syslog.syslog(syslog.LOG_INFO, 'AAA Update: key: {}, op: {}, data: {}'.format(key, op, data)) - - def passwh_handler(self, key, op, data): - self.passwcfg.passw_policies_update(key, data) - syslog.syslog(syslog.LOG_INFO, 'PASSW_HARDENING Update: key: {}, op: {}, data: {}'.format(key, op, data)) - - def tacacs_server_handler(self, key, op, data): - self.aaacfg.tacacs_server_update(key, data) - log_data = copy.deepcopy(data) - if 'passkey' in log_data: - log_data['passkey'] = obfuscate(log_data['passkey']) - syslog.syslog(syslog.LOG_INFO, 'TACPLUS_SERVER update: key: {}, op: {}, data: {}'.format(key, op, log_data)) - - def tacacs_global_handler(self, key, op, data): - self.aaacfg.tacacs_global_update(key, data) - log_data = copy.deepcopy(data) - if 'passkey' in log_data: - log_data['passkey'] = obfuscate(log_data['passkey']) - syslog.syslog(syslog.LOG_INFO, 'TACPLUS Global update: key: {}, op: {}, data: {}'.format(key, op, log_data)) - - def radius_server_handler(self, key, op, data): - self.aaacfg.radius_server_update(key, data) - log_data = copy.deepcopy(data) - if 'passkey' in log_data: - log_data['passkey'] = obfuscate(log_data['passkey']) - syslog.syslog(syslog.LOG_INFO, 'RADIUS_SERVER update: key: {}, op: {}, data: {}'.format(key, op, log_data)) - - def radius_global_handler(self, key, op, data): - self.aaacfg.radius_global_update(key, data) - log_data = copy.deepcopy(data) - if 'passkey' in log_data: - log_data['passkey'] = obfuscate(log_data['passkey']) - syslog.syslog(syslog.LOG_INFO, 'RADIUS Global update: key: {}, op: {}, data: {}'.format(key, op, log_data)) - - def mgmt_intf_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - mgmt_intf_name = self.__get_intf_name(key) - self.aaacfg.handle_radius_source_intf_ip_chg(mgmt_intf_name) - self.aaacfg.handle_radius_nas_ip_chg(mgmt_intf_name) - - def lpbk_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - if op == "DEL": - add = False - else: - add = True - - self.iptables.iptables_handler(key, data, add) - lpbk_name = self.__get_intf_name(key) - self.ntpcfg.handle_ntp_source_intf_chg(lpbk_name) - self.aaacfg.handle_radius_source_intf_ip_chg(key) - - def vlan_intf_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - self.aaacfg.handle_radius_source_intf_ip_chg(key) - - def vlan_sub_intf_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - self.aaacfg.handle_radius_source_intf_ip_chg(key) - - def portchannel_intf_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - self.aaacfg.handle_radius_source_intf_ip_chg(key) - - def phy_intf_handler(self, key, op, data): - key = ConfigDBConnector.deserialize_key(key) - self.aaacfg.handle_radius_source_intf_ip_chg(key) - - def ntp_server_handler(self, key, op, data): - self.ntpcfg.ntp_server_update(key, op) - - def ntp_global_handler(self, key, op, data): - self.ntpcfg.ntp_global_update(key, data) - - def kdump_handler (self, key, op, data): - syslog.syslog(syslog.LOG_INFO, 'Kdump handler...') - self.kdumpCfg.kdump_update(key, data) - - def wait_till_system_init_done(self): - # No need to print the output in the log file so using the "--quiet" - # flag - systemctl_cmd = "sudo systemctl is-system-running --wait --quiet" - subprocess.call(systemctl_cmd, shell=True) - - def register_callbacks(self): - - def make_callback(func): - def callback(table, key, data): - if data is None: - op = "DEL" - else: - op = "SET" - return func(key, op, data) - return callback - - self.config_db.subscribe('KDUMP', make_callback(self.kdump_handler)) - # Handle FEATURE updates before other tables - self.config_db.subscribe('FEATURE', make_callback(self.feature_handler.handler)) - # Handle AAA, TACACS and RADIUS related tables - self.config_db.subscribe('AAA', make_callback(self.aaa_handler)) - self.config_db.subscribe('TACPLUS', make_callback(self.tacacs_global_handler)) - self.config_db.subscribe('TACPLUS_SERVER', make_callback(self.tacacs_server_handler)) - self.config_db.subscribe('RADIUS', make_callback(self.radius_global_handler)) - self.config_db.subscribe('RADIUS_SERVER', make_callback(self.radius_server_handler)) - self.config_db.subscribe('PASSW_HARDENING', make_callback(self.passwh_handler)) - # Handle IPTables configuration - self.config_db.subscribe('LOOPBACK_INTERFACE', make_callback(self.lpbk_handler)) - # Handle NTP & NTP_SERVER updates - self.config_db.subscribe('NTP', make_callback(self.ntp_global_handler)) - self.config_db.subscribe('NTP_SERVER', make_callback(self.ntp_server_handler)) - # Handle updates to src intf changes in radius - self.config_db.subscribe('MGMT_INTERFACE', make_callback(self.mgmt_intf_handler)) - self.config_db.subscribe('VLAN_INTERFACE', make_callback(self.vlan_intf_handler)) - self.config_db.subscribe('VLAN_SUB_INTERFACE', make_callback(self.vlan_sub_intf_handler)) - self.config_db.subscribe('PORTCHANNEL_INTERFACE', make_callback(self.portchannel_intf_handler)) - self.config_db.subscribe('INTERFACE', make_callback(self.phy_intf_handler)) - - syslog.syslog(syslog.LOG_INFO, - "Waiting for systemctl to finish initialization") - self.wait_till_system_init_done() - syslog.syslog(syslog.LOG_INFO, - "systemctl has finished initialization -- proceeding ...") - - def start(self): - self.config_db.listen(init_data_handler=self.load) - - -def main(): - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGHUP, signal_handler) - daemon = HostConfigDaemon() - daemon.register_callbacks() - daemon.start() - -if __name__ == "__main__": - main() - diff --git a/src/sonic-host-services/scripts/procdockerstatsd b/src/sonic-host-services/scripts/procdockerstatsd deleted file mode 100755 index da6fa433e3..0000000000 --- a/src/sonic-host-services/scripts/procdockerstatsd +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python3 -''' -procdockerstatsd -Daemon which periodically gathers process and docker statistics and pushes the data to STATE_DB -''' - -import os -import re -import subprocess -import sys -import time -from datetime import datetime - -from sonic_py_common import daemon_base -from swsscommon import swsscommon - -VERSION = '1.0' - -SYSLOG_IDENTIFIER = "procdockerstatsd" - -REDIS_HOSTIP = "127.0.0.1" - - -class ProcDockerStats(daemon_base.DaemonBase): - - def __init__(self, log_identifier): - super(ProcDockerStats, self).__init__(log_identifier) - self.state_db = swsscommon.SonicV2Connector(host=REDIS_HOSTIP) - self.state_db.connect("STATE_DB") - - def run_command(self, cmd): - proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE) - (stdout, stderr) = proc.communicate() - if proc.returncode != 0: - self.log_error("Error running command '{}'".format(cmd)) - return None - else: - return stdout - - def format_docker_cmd_output(self, cmdout): - lines = cmdout.splitlines() - keys = re.split(" +", lines[0]) - docker_data = dict() - docker_data_list = [] - for line in lines[1:]: - values = re.split(" +", line) - docker_data = {key: value for key, value in zip(keys, values)} - docker_data_list.append(docker_data) - formatted_dict = self.create_docker_dict(docker_data_list) - return formatted_dict - - def format_process_cmd_output(self, cmdout): - lines = cmdout.splitlines() - keys = re.split(" +", lines[0]) - key_list = [key for key in keys if key] - process_data = dict() - process_data_list = [] - for line in lines[1:]: - values = re.split(" +", line) - # To remove extra space before UID - val_list = [val for val in values if val] - # Merging extra columns created due to space in cmd ouput - val_list[8:] = [' '.join(val_list[8:])] - process_data = {key: value for key, value in zip(key_list, val_list)} - process_data_list.append(process_data) - return process_data_list - - def convert_to_bytes(self, value): - UNITS_B = 'B' - UNITS_KB = 'KB' - UNITS_MB = 'MB' - UNITS_MiB = 'MiB' - UNITS_GiB = 'GiB' - - res = re.match(r'(\d+\.?\d*)([a-zA-Z]+)', value) - value = float(res.groups()[0]) - units = res.groups()[1] - if units.lower() == UNITS_KB.lower(): - value *= 1000 - elif units.lower() == UNITS_MB.lower(): - value *= (1000 * 1000) - elif units.lower() == UNITS_MiB.lower(): - value *= (1024 * 1024) - elif units.lower() == UNITS_GiB.lower(): - value *= (1024 * 1024 * 1024) - - return int(round(value)) - - def create_docker_dict(self, dict_list): - dockerdict = {} - for row in dict_list[0:]: - cid = row.get('CONTAINER ID') - if cid: - key = 'DOCKER_STATS|{}'.format(cid) - dockerdict[key] = {} - dockerdict[key]['NAME'] = row.get('NAME') - - cpu = row.get('CPU %').split("%") - dockerdict[key]['CPU%'] = str(cpu[0]) - - memuse = row.get('MEM USAGE / LIMIT').split(" / ") - # converting MiB and GiB to bytes - dockerdict[key]['MEM_BYTES'] = str(self.convert_to_bytes(memuse[0])) - dockerdict[key]['MEM_LIMIT_BYTES'] = str(self.convert_to_bytes(memuse[1])) - - mem = row.get('MEM %').split("%") - dockerdict[key]['MEM%'] = str(mem[0]) - - netio = row.get('NET I/O').split(" / ") - dockerdict[key]['NET_IN_BYTES'] = str(self.convert_to_bytes(netio[0])) - dockerdict[key]['NET_OUT_BYTES'] = str(self.convert_to_bytes(netio[1])) - - blockio = row.get('BLOCK I/O').split(" / ") - dockerdict[key]['BLOCK_IN_BYTES'] = str(self.convert_to_bytes(blockio[0])) - dockerdict[key]['BLOCK_OUT_BYTES'] = str(self.convert_to_bytes(blockio[1])) - - dockerdict[key]['PIDS'] = row.get('PIDS') - return dockerdict - - def update_dockerstats_command(self): - cmd = "docker stats --no-stream -a" - data = self.run_command(cmd) - if not data: - self.log_error("'{}' returned null output".format(cmd)) - return False - dockerdata = self.format_docker_cmd_output(data) - if not dockerdata: - self.log_error("formatting for docker output failed") - return False - # wipe out all data from state_db before updating - self.state_db.delete_all_by_pattern('STATE_DB', 'DOCKER_STATS|*') - for k1,v1 in dockerdata.items(): - for k2,v2 in v1.items(): - self.update_state_db(k1, k2, v2) - return True - - def update_processstats_command(self): - data = self.run_command("ps -eo uid,pid,ppid,%mem,%cpu,stime,tty,time,cmd --sort -%cpu | head -1024") - processdata = self.format_process_cmd_output(data) - value = "" - # wipe out all data before updating with new values - self.state_db.delete_all_by_pattern('STATE_DB', 'PROCESS_STATS|*') - for row in processdata[0:]: - cid = row.get('PID') - if cid: - value = 'PROCESS_STATS|{}'.format(cid) - uid = row.get('UID') - self.update_state_db(value, 'UID', uid) - ppid = row.get('PPID') - self.update_state_db(value, 'PPID', ppid) - cpu = row.get('%CPU') - self.update_state_db(value, '%CPU', str(cpu)) - mem = row.get('%MEM') - self.update_state_db(value, '%MEM', str(mem)) - stime = row.get('STIME') - self.update_state_db(value, 'STIME', stime) - tty = row.get('TT') - self.update_state_db(value, 'TT', tty) - time = row.get('TIME') - self.update_state_db(value, 'TIME', time) - cmd = row.get('CMD') - self.update_state_db(value, 'CMD', cmd) - - def update_state_db(self, key1, key2, value2): - self.state_db.set('STATE_DB', key1, key2, value2) - - def run(self): - self.log_info("Starting up ...") - - if not os.getuid() == 0: - self.log_error("Must be root to run this daemon") - print("Must be root to run this daemon") - sys.exit(1) - - while True: - self.update_dockerstats_command() - datetimeobj = datetime.now() - # Adding key to store latest update time. - self.update_state_db('DOCKER_STATS|LastUpdateTime', 'lastupdate', str(datetimeobj)) - self.update_processstats_command() - self.update_state_db('PROCESS_STATS|LastUpdateTime', 'lastupdate', str(datetimeobj)) - - # Data need to be updated every 2 mins. hence adding delay of 120 seconds - time.sleep(120) - - self.log_info("Exiting ...") - - -def main(): - # Instantiate a ProcDockerStats object - pd = ProcDockerStats(SYSLOG_IDENTIFIER) - - # Log all messages from INFO level and higher - pd.set_min_log_priority_info() - - pd.run() - - -if __name__ == '__main__': - main() diff --git a/src/sonic-host-services/scripts/process-reboot-cause b/src/sonic-host-services/scripts/process-reboot-cause deleted file mode 100755 index df43a131fa..0000000000 --- a/src/sonic-host-services/scripts/process-reboot-cause +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# -# process-reboot-cause -# -# Program designed to read the previous reboot-cause files, log the last previous reboot-cause. -# And read the saved reboot-cause history files and save the reboot cause in the state-db. -# - -try: - import json - import os - import pwd - import sys - - from swsscommon import swsscommon - from sonic_py_common import logger -except ImportError as err: - raise ImportError("%s - required module not found" % str(err)) - -VERSION = "1.0" - -SYSLOG_IDENTIFIER = "process-reboot-cause" - -REBOOT_CAUSE_DIR = "/host/reboot-cause/" -REBOOT_CAUSE_HISTORY_DIR = "/host/reboot-cause/history/" -PREVIOUS_REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "previous-reboot-cause.json") -USER_ISSUED_REBOOT_CAUSE_REGEX ="User issued \'{}\' command [User: {}, Time: {}]" - -REBOOT_CAUSE_UNKNOWN = "Unknown" -REBOOT_CAUSE_TABLE_NAME = "REBOOT_CAUSE" - -REDIS_HOSTIP = "127.0.0.1" -state_db = None - -# Global logger class instance -sonic_logger = logger.Logger(SYSLOG_IDENTIFIER) - - -# ============================= Functions ============================= -def read_reboot_cause_files_and_save_state_db(): - # Connect State DB - state_db = swsscommon.SonicV2Connector(host=REDIS_HOSTIP) - state_db.connect(state_db.STATE_DB) - - # Sort the previous reboot cause files by creation time - REBOOT_FILE_LIST = [os.path.join(REBOOT_CAUSE_HISTORY_DIR, i) for i in os.listdir(REBOOT_CAUSE_HISTORY_DIR)] - TIME_SORTED_FULL_REBOOT_FILE_LIST = sorted(REBOOT_FILE_LIST, key=os.path.getmtime, reverse=True) - - data = [] - # Read each sorted previous reboot cause file and update the state db with previous reboot cause information - for i in range(min(10, len(TIME_SORTED_FULL_REBOOT_FILE_LIST))): - x = TIME_SORTED_FULL_REBOOT_FILE_LIST[i] - if os.path.isfile(x): - with open(x, "r") as cause_file: - data = json.load(cause_file) - _hash = '{}|{}'.format(REBOOT_CAUSE_TABLE_NAME, data['gen_time']) - state_db.set(state_db.STATE_DB, _hash, 'cause', data['cause']) - state_db.set(state_db.STATE_DB, _hash, 'time', data['time']) - state_db.set(state_db.STATE_DB, _hash, 'user', data['user']) - state_db.set(state_db.STATE_DB, _hash, 'comment', data['comment']) - - if len(TIME_SORTED_FULL_REBOOT_FILE_LIST) > 10: - for i in range(len(TIME_SORTED_FULL_REBOOT_FILE_LIST)): - if i >= 10: - x = TIME_SORTED_FULL_REBOOT_FILE_LIST[i] - os.remove(x) - - -def main(): - # Configure logger to log all messages INFO level and higher - sonic_logger.set_min_log_priority_info() - - sonic_logger.log_info("Starting up...") - - if not os.geteuid() == 0: - sonic_logger.log_error("User {} does not have permission to execute".format(pwd.getpwuid(os.getuid()).pw_name)) - sys.exit("This utility must be run as root") - - # Set a default previous reboot cause - previous_reboot_cause = REBOOT_CAUSE_UNKNOWN - - # Read the most recent reboot cause file and log data to syslog - if os.path.exists(PREVIOUS_REBOOT_CAUSE_FILE): - with open(PREVIOUS_REBOOT_CAUSE_FILE, "r") as last_cause_file: - data = json.load(last_cause_file) - if data['user']: - previous_reboot_cause = USER_ISSUED_REBOOT_CAUSE_REGEX.format(data['cause'], data['user'], data['time']) - else: - previous_reboot_cause = "{}".format(data['cause']) - - # Log the last reboot cause to the syslog - sonic_logger.log_info("Previous reboot cause: {}".format(previous_reboot_cause)) - - if os.path.exists(REBOOT_CAUSE_HISTORY_DIR): - # Read the previous reboot cause from saved reboot-cause files and save the previous reboot cause upto 10 entry to the state db - read_reboot_cause_files_and_save_state_db() - - -if __name__ == "__main__": - main() diff --git a/src/sonic-host-services/scripts/sonic-host-server b/src/sonic-host-services/scripts/sonic-host-server deleted file mode 100755 index bf4449e34e..0000000000 --- a/src/sonic-host-services/scripts/sonic-host-server +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 -"""Host Service to handle docker-to-host communication""" - -import os -import os.path -import glob -import importlib -import sys - -import dbus -import dbus.service -import dbus.mainloop.glib - -from gi.repository import GObject - -def find_module_path(): - """Find path for host_moduels""" - try: - from host_modules import host_service - return os.path.dirname(host_service.__file__) - except ImportError as e: - return None - -def register_modules(mod_path): - """Register all host modules""" - sys.path.append(mod_path) - for mod_file in glob.glob(os.path.join(mod_path, '*.py')): - if os.path.isfile(mod_file) and not mod_file.endswith('__init__.py'): - mod_name = os.path.basename(mod_file)[:-3] - module = importlib.import_module(mod_name) - - register_cb = getattr(module, 'register', None) - if not register_cb: - raise Exception('Missing register function for ' + mod_name) - - register_dbus(register_cb) - -def register_dbus(register_cb): - """Register DBus handlers for individual modules""" - handler_class, mod_name = register_cb() - handlers[mod_name] = handler_class(mod_name) - -# Create a main loop reactor -GObject.threads_init() -dbus.mainloop.glib.threads_init() -dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) -loop = GObject.MainLoop() -handlers = {} - -class SignalManager(object): - ''' This is used to manage signals received (e.g. SIGINT). - When stopping a process (systemctl stop [service]), systemd sends - a SIGTERM signal. - ''' - shutdown = False - def __init__(self): - ''' Install signal handlers. - - SIGTERM is invoked when systemd wants to stop the daemon. - For example, "systemctl stop mydaemon.service" - or, "systemctl restart mydaemon.service" - - ''' - import signal - signal.signal(signal.SIGTERM, self.sigterm_hdlr) - - def sigterm_hdlr(self, _signum, _frame): - self.shutdown = True - loop.quit() - -sigmgr = SignalManager() -mod_path = find_module_path() -if mod_path is not None: - register_modules(mod_path) - -# Only run if we actually have some handlers -if handlers: - import systemd.daemon - systemd.daemon.notify("READY=1") - - while not sigmgr.shutdown: - loop.run() - if sigmgr.shutdown: - break - - systemd.daemon.notify("STOPPING=1") -else: - print("No handlers to register, quitting...") diff --git a/src/sonic-host-services/setup.cfg b/src/sonic-host-services/setup.cfg deleted file mode 100644 index b7e478982c..0000000000 --- a/src/sonic-host-services/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[aliases] -test=pytest diff --git a/src/sonic-host-services/setup.py b/src/sonic-host-services/setup.py deleted file mode 100644 index 9ed9e1082a..0000000000 --- a/src/sonic-host-services/setup.py +++ /dev/null @@ -1,57 +0,0 @@ -from setuptools import setup - -setup( - name = 'sonic-host-services', - version = '1.0', - description = 'Python services which run in the SONiC host OS', - license = 'Apache 2.0', - author = 'SONiC Team', - author_email = 'linuxnetdev@microsoft.com', - url = 'https://github.com/Azure/sonic-buildimage', - maintainer = 'Joe LeVeque', - maintainer_email = 'jolevequ@microsoft.com', - packages = [ - 'host_modules' - ], - scripts = [ - 'scripts/caclmgrd', - 'scripts/hostcfgd', - 'scripts/aaastatsd', - 'scripts/procdockerstatsd', - 'scripts/determine-reboot-cause', - 'scripts/process-reboot-cause', - 'scripts/sonic-host-server' - ], - install_requires = [ - 'dbus-python', - 'systemd-python', - 'Jinja2>=2.10', - 'PyGObject', - 'sonic-py-common' - ], - setup_requires = [ - 'pytest-runner', - 'wheel' - ], - tests_require = [ - 'parameterized', - 'pytest', - 'pyfakefs', - 'sonic-py-common', - 'deepdiff' - ], - classifiers = [ - 'Development Status :: 3 - Alpha', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'Intended Audience :: Information Technology', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.7', - 'Topic :: System', - ], - keywords = 'sonic SONiC host services', - test_suite = 'setup.get_test_suite' -) diff --git a/src/sonic-host-services/tests/__init__.py b/src/sonic-host-services/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sonic-host-services/tests/caclmgrd/__init__.py b/src/sonic-host-services/tests/caclmgrd/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sonic-host-services/tests/caclmgrd/caclmgrd_bfd_test.py b/src/sonic-host-services/tests/caclmgrd/caclmgrd_bfd_test.py deleted file mode 100644 index 358d4c413b..0000000000 --- a/src/sonic-host-services/tests/caclmgrd/caclmgrd_bfd_test.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import sys -import swsscommon - -from parameterized import parameterized -from sonic_py_common.general import load_module_from_source -from unittest import TestCase, mock -from pyfakefs.fake_filesystem_unittest import patchfs - -from .test_bfd_vectors import CACLMGRD_BFD_TEST_VECTOR -from tests.common.mock_configdb import MockConfigDb -from unittest.mock import MagicMock, patch - -DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json' - -class TestCaclmgrdBfd(TestCase): - """ - Test caclmgrd bfd - """ - def setUp(self): - swsscommon.swsscommon.ConfigDBConnector = MockConfigDb - test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - modules_path = os.path.dirname(test_path) - scripts_path = os.path.join(modules_path, "scripts") - sys.path.insert(0, modules_path) - caclmgrd_path = os.path.join(scripts_path, 'caclmgrd') - self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path) - - @parameterized.expand(CACLMGRD_BFD_TEST_VECTOR) - @patchfs - def test_caclmgrd_bfd(self, test_name, test_data, fs): - if not os.path.exists(DBCONFIG_PATH): - fs.create_file(DBCONFIG_PATH) # fake database_config.json - - MockConfigDb.set_config_db(test_data["config_db"]) - - with mock.patch("caclmgrd.subprocess") as mocked_subprocess: - popen_mock = mock.Mock() - popen_attrs = test_data["popen_attributes"] - popen_mock.configure_mock(**popen_attrs) - mocked_subprocess.Popen.return_value = popen_mock - mocked_subprocess.PIPE = -1 - - call_rc = test_data["call_rc"] - mocked_subprocess.call.return_value = call_rc - - caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd") - caclmgrd_daemon.allow_bfd_protocol('') - mocked_subprocess.Popen.assert_has_calls(test_data["expected_subprocess_calls"], any_order=True) - diff --git a/src/sonic-host-services/tests/caclmgrd/caclmgrd_dhcp_test.py b/src/sonic-host-services/tests/caclmgrd/caclmgrd_dhcp_test.py deleted file mode 100644 index a6eae7ba12..0000000000 --- a/src/sonic-host-services/tests/caclmgrd/caclmgrd_dhcp_test.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import sys -import swsscommon - -from parameterized import parameterized -from sonic_py_common.general import load_module_from_source -from unittest import TestCase, mock -from pyfakefs.fake_filesystem_unittest import patchfs - -from .test_dhcp_vectors import CACLMGRD_DHCP_TEST_VECTOR -from tests.common.mock_configdb import MockConfigDb - -DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json' - -class TestCaclmgrdDhcp(TestCase): - """ - Test caclmgrd dhcp - """ - def setUp(self): - swsscommon.ConfigDBConnector = MockConfigDb - test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - modules_path = os.path.dirname(test_path) - scripts_path = os.path.join(modules_path, "scripts") - sys.path.insert(0, modules_path) - caclmgrd_path = os.path.join(scripts_path, 'caclmgrd') - self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path) - - @parameterized.expand(CACLMGRD_DHCP_TEST_VECTOR) - @patchfs - def test_caclmgrd_dhcp(self, test_name, test_data, fs): - if not os.path.exists(DBCONFIG_PATH): - fs.create_file(DBCONFIG_PATH) # fake database_config.json - - MockConfigDb.set_config_db(test_data["config_db"]) - - with mock.patch("caclmgrd.subprocess") as mocked_subprocess: - popen_mock = mock.Mock() - popen_attrs = test_data["popen_attributes"] - popen_mock.configure_mock(**popen_attrs) - mocked_subprocess.Popen.return_value = popen_mock - - call_rc = test_data["call_rc"] - mocked_subprocess.call.return_value = call_rc - - mark = test_data["mark"] - - caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd") - mux_update = test_data["mux_update"] - - for key,data in mux_update: - caclmgrd_daemon.update_dhcp_acl(key, '', data, mark) - - mocked_subprocess.call.assert_has_calls(test_data["expected_subprocess_calls"], any_order=False) diff --git a/src/sonic-host-services/tests/caclmgrd/caclmgrd_feature_test.py b/src/sonic-host-services/tests/caclmgrd/caclmgrd_feature_test.py deleted file mode 100644 index b162ed01b5..0000000000 --- a/src/sonic-host-services/tests/caclmgrd/caclmgrd_feature_test.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import sys -import swsscommon - -from parameterized import parameterized -from sonic_py_common.general import load_module_from_source -from unittest import TestCase, mock -from pyfakefs.fake_filesystem_unittest import patchfs - -from .test_bfd_vectors import CACLMGRD_BFD_TEST_VECTOR -from tests.common.mock_configdb import MockConfigDb -from unittest.mock import MagicMock, patch - -DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json' - -class TestFeature(TestCase): - """ - Test caclmgrd feature present - """ - def setUp(self): - swsscommon.swsscommon.ConfigDBConnector = MockConfigDb - test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - modules_path = os.path.dirname(test_path) - scripts_path = os.path.join(modules_path, "scripts") - sys.path.insert(0, modules_path) - caclmgrd_path = os.path.join(scripts_path, 'caclmgrd') - self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path) - - @parameterized.expand(CACLMGRD_BFD_TEST_VECTOR) - @patchfs - def test_feature_present(self, test_name, test_data, fs): - if not os.path.exists(DBCONFIG_PATH): - fs.create_file(DBCONFIG_PATH) # fake database_config.json - - MockConfigDb.set_config_db(test_data["config_db"]) - - with mock.patch("caclmgrd.subprocess") as mocked_subprocess: - popen_mock = mock.Mock() - popen_attrs = test_data["popen_attributes"] - popen_mock.configure_mock(**popen_attrs) - mocked_subprocess.Popen.return_value = popen_mock - mocked_subprocess.PIPE = -1 - - call_rc = test_data["call_rc"] - mocked_subprocess.call.return_value = call_rc - - caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd") - caclmgrd_daemon.update_feature_present() - self.assertTrue("bgp" in caclmgrd_daemon.feature_present) - self.assertEqual(caclmgrd_daemon.feature_present["bgp"], True) diff --git a/src/sonic-host-services/tests/caclmgrd/test_bfd_vectors.py b/src/sonic-host-services/tests/caclmgrd/test_bfd_vectors.py deleted file mode 100644 index f746937633..0000000000 --- a/src/sonic-host-services/tests/caclmgrd/test_bfd_vectors.py +++ /dev/null @@ -1,35 +0,0 @@ -from unittest.mock import call -import subprocess - -""" - caclmgrd bfd test vector -""" -CACLMGRD_BFD_TEST_VECTOR = [ - [ - "BFD_SESSION_TEST", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - "bgp": { - "auto_restart": "enabled", - "state": "enabled", - } - }, - }, - "expected_subprocess_calls": [ - call("iptables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT", shell=True, universal_newlines=True, stdout=subprocess.PIPE), - call("ip6tables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT", shell=True, universal_newlines=True, stdout=subprocess.PIPE) - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - } - ] -] diff --git a/src/sonic-host-services/tests/caclmgrd/test_dhcp_vectors.py b/src/sonic-host-services/tests/caclmgrd/test_dhcp_vectors.py deleted file mode 100644 index 242faae34d..0000000000 --- a/src/sonic-host-services/tests/caclmgrd/test_dhcp_vectors.py +++ /dev/null @@ -1,340 +0,0 @@ -from unittest.mock import call - -""" - caclmgrd dhcp test vector -""" -CACLMGRD_DHCP_TEST_VECTOR = [ - [ - "Active_Present_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "active"}), - ("Ethernet8", {"state": "active"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --delete DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - call("iptables --delete DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": None, - }, - ], - [ - "Active_Present_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "active"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - call("iptables --delete DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": "0x67004", - }, - ], - [ - "Active_Absent_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "active"}), - ("Ethernet8", {"state": "active"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": None, - }, - ], - [ - "Active_Absent_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "active"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": "0x67004", - }, - ], - [ - "Standby_Present_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "standby"}), - ("Ethernet8", {"state": "standby"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": None, - }, - ], - [ - "Standby_Present_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "standby"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": "0x67004", - }, - ], - [ - "Standby_Absent_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "standby"}), - ("Ethernet8", {"state": "standby"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --insert DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - call("iptables --insert DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": None, - }, - ], - [ - "Standby_Absent_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "standby"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - call("iptables --insert DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": "0x67004", - }, - ], - [ - "Unknown_Present_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "unknown"}), - ("Ethernet8", {"state": "unknown"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --delete DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - call("iptables --delete DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": None, - }, - ], - [ - "Unknown_Present_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "unknown"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - call("iptables --delete DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 0, - "mark": "0x67004", - }, - ], - [ - "Uknown_Absent_Interface", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "unknown"}), - ("Ethernet8", {"state": "unknown"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True), - call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": None, - }, - ], - [ - "Uknown_Absent_Mark", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "FEATURE": { - }, - }, - "mux_update": [ - ("Ethernet4", {"state": "unknown"}), - ], - "expected_subprocess_calls": [ - call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error'), - }, - "call_rc": 1, - "mark": "0x67004", - }, - ], -] diff --git a/src/sonic-host-services/tests/common/mock_configdb.py b/src/sonic-host-services/tests/common/mock_configdb.py deleted file mode 100644 index f0b12b11ab..0000000000 --- a/src/sonic-host-services/tests/common/mock_configdb.py +++ /dev/null @@ -1,58 +0,0 @@ -class MockConfigDb(object): - """ - Mock Config DB which responds to data tables requests and store updates to the data table - """ - STATE_DB = None - CONFIG_DB = None - event_queue = [] - - def __init__(self, **kwargs): - self.handlers = {} - - @staticmethod - def set_config_db(test_config_db): - MockConfigDb.CONFIG_DB = test_config_db - - @staticmethod - def deserialize_key(key, separator="|"): - tokens = key.split(separator) - if len(tokens) > 1: - return tuple(tokens) - else: - return key - - @staticmethod - def get_config_db(): - return MockConfigDb.CONFIG_DB - - def connect(self, wait_for_init=True, retry_on=True): - pass - - def get(self, db_id, key, field): - return MockConfigDb.CONFIG_DB[key][field] - - def get_entry(self, key, field): - return MockConfigDb.CONFIG_DB[key][field] - - def mod_entry(self, key, field, data): - existing_data = self.get_entry(key, field) - existing_data.update(data) - self.set_entry(key, field, existing_data) - - def set_entry(self, key, field, data): - MockConfigDb.CONFIG_DB[key][field] = data - - def get_table(self, table_name): - return MockConfigDb.CONFIG_DB[table_name] - - def subscribe(self, table_name, callback): - self.handlers[table_name] = callback - - def listen(self, init_data_handler=None): - for e in MockConfigDb.event_queue: - self.handlers[e[0]](e[0], e[1], self.get_entry(e[0], e[1])) - - -class MockDBConnector(): - def __init__(self, db, val): - pass diff --git a/src/sonic-host-services/tests/determine-reboot-cause_test.py b/src/sonic-host-services/tests/determine-reboot-cause_test.py deleted file mode 100644 index 7d22a512f8..0000000000 --- a/src/sonic-host-services/tests/determine-reboot-cause_test.py +++ /dev/null @@ -1,119 +0,0 @@ -import sys -import os -import pytest - -from swsscommon import swsscommon -from sonic_py_common.general import load_module_from_source - -# TODO: Remove this if/else block once we no longer support Python 2 -if sys.version_info.major == 3: - from unittest import mock -else: - # Expect the 'mock' package for python 2 - # https://pypi.python.org/pypi/mock - import mock - -# TODO: Remove this if/else block once we no longer support Python 2 -if sys.version_info.major == 3: - BUILTINS = "builtins" -else: - BUILTINS = "__builtin__" - -from .mock_connector import MockConnector - -swsscommon.SonicV2Connector = MockConnector - -test_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, "scripts") -sys.path.insert(0, modules_path) - -# Load the file under test -determine_reboot_cause_path = os.path.join(scripts_path, 'determine-reboot-cause') -determine_reboot_cause = load_module_from_source('determine_reboot_cause', determine_reboot_cause_path) - - -PROC_CMDLINE_CONTENTS = """\ -BOOT_IMAGE=/image-20191130.52/boot/vmlinuz-4.9.0-11-2-amd64 root=/dev/sda4 rw console=tty0 console=ttyS1,9600n8 quiet net.ifnames=0 biosdevname=0 loop=image-20191130.52/fs.squashfs loopfstype=squashfs apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 module_blacklist=gpio_ich SONIC_BOOT_TYPE=warm""" - -EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE = "warm" - -PROC_CMDLINE_CONTENTS = """\ -BOOT_IMAGE=/image-20191130.52/boot/vmlinuz-4.9.0-11-2-amd64 root=/dev/sda4 rw console=tty0 console=ttyS1,9600n8 quiet net.ifnames=0 biosdevname=0 loop=image-20191130.52/fs.squashfs loopfstype=squashfs apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 module_blacklist=gpio_ich SONIC_BOOT_TYPE=warm""" - -REBOOT_CAUSE_CONTENTS = """\ -User issued 'warm-reboot' command [User: admin, Time: Mon Nov 2 22:37:45 UTC 2020]""" - -GET_SONIC_VERSION_INFO = {'commit_id': 'e59ec8291', 'build_date': 'Mon Nov 2 06:00:14 UTC 2020', 'build_number': 75, 'kernel_version': '4.9.0-11-2-amd64', 'debian_version': '9.13', 'built_by': 'sonicbld@jenkins-slave-phx-2', 'asic_type': 'mellanox', 'build_version': '20191130.52'} - -REBOOT_CAUSE_WATCHDOG = "Watchdog" -GEN_TIME_WATCHDOG = "2020_10_22_03_15_08" -REBOOT_CAUSE_USER = "User issued 'reboot' command [User: admin, Time: Thu Oct 22 03:11:08 UTC 2020]" -GEN_TIME_USER = "2020_10_22_03_14_07" -REBOOT_CAUSE_KERNEL_PANIC = "Kernel Panic [Time: Sun Mar 28 13:45:12 UTC 2021]" -GEN_TIME_KERNEL_PANIC = "2021_3_28_13_48_49" - - -EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE = "warm-reboot" -EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_USER = "User issued 'warm-reboot' command [User: admin, Time: Mon Nov 2 22:37:45 UTC 2020]" -EXPECTED_FIND_FIRSTBOOT_VERSION = " (First boot of SONiC version 20191130.52)" -EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_FIRSTBOOT = "Unknown (First boot of SONiC version 20191130.52)" -EXPECTED_HARDWARE_REBOOT_CAUSE = {"warm-reboot", ""} - -EXPECTED_WATCHDOG_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2020_10_22_03_15_08', 'cause': 'Watchdog', 'user': 'N/A', 'time': 'N/A'} -EXPECTED_USER_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2020_10_22_03_14_07', 'cause': 'reboot', 'user': 'admin', 'time': 'Thu Oct 22 03:11:08 UTC 2020'} -EXPECTED_KERNEL_PANIC_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2021_3_28_13_48_49', 'cause': 'Kernel Panic', 'user': 'N/A', 'time': 'Sun Mar 28 13:45:12 UTC 2021'} - - -class TestDetermineRebootCause(object): - def test_parse_warmfast_reboot_from_proc_cmdline(self): - with mock.patch("os.path.isfile") as mock_isfile: - mock_isfile.return_value = True - open_mocked = mock.mock_open(read_data=PROC_CMDLINE_CONTENTS) - with mock.patch("{}.open".format(BUILTINS), open_mocked): - result = determine_reboot_cause.parse_warmfast_reboot_from_proc_cmdline() - assert result == EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE - open_mocked.assert_called_once_with("/proc/cmdline") - - def test_find_software_reboot_cause_user(self): - with mock.patch("os.path.isfile") as mock_isfile: - mock_isfile.return_value = True - open_mocked = mock.mock_open(read_data=REBOOT_CAUSE_CONTENTS) - with mock.patch("{}.open".format(BUILTINS), open_mocked): - result = determine_reboot_cause.find_software_reboot_cause_from_reboot_cause_file() - assert result == EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_USER - open_mocked.assert_called_once_with("/host/reboot-cause/reboot-cause.txt") - - def test_find_software_reboot_cause_first_boot(self): - with mock.patch("sonic_py_common.device_info.get_sonic_version_info", return_value=GET_SONIC_VERSION_INFO): - result = determine_reboot_cause.find_first_boot_version() - assert result == EXPECTED_FIND_FIRSTBOOT_VERSION - - def test_find_software_reboot_cause(self): - with mock.patch("determine_reboot_cause.find_software_reboot_cause_from_reboot_cause_file", return_value="Unknown"): - with mock.patch("os.path.isfile") as mock_isfile: - mock_isfile.return_value = False - result = determine_reboot_cause.find_software_reboot_cause() - assert result == "Unknown" - - def test_find_proc_cmdline_reboot_cause(self): - with mock.patch("determine_reboot_cause.parse_warmfast_reboot_from_proc_cmdline", return_value="fast-reboot"): - result = determine_reboot_cause.find_proc_cmdline_reboot_cause() - assert result == "fast-reboot" - - def test_find_hardware_reboot_cause(self): - with mock.patch("determine_reboot_cause.get_reboot_cause_from_platform", return_value=("Powerloss", None)): - result = determine_reboot_cause.find_hardware_reboot_cause() - assert result == "Powerloss (None)" - - def test_get_reboot_cause_dict_watchdog(self): - reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_WATCHDOG, "", GEN_TIME_WATCHDOG) - assert reboot_cause_dict == EXPECTED_WATCHDOG_REBOOT_CAUSE_DICT - - def test_get_reboot_cause_dict_user(self): - reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_USER, "", GEN_TIME_USER) - assert reboot_cause_dict == EXPECTED_USER_REBOOT_CAUSE_DICT - - def test_get_reboot_cause_dict_kernel_panic(self): - reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_KERNEL_PANIC, "", GEN_TIME_KERNEL_PANIC) - assert reboot_cause_dict == EXPECTED_KERNEL_PANIC_REBOOT_CAUSE_DICT diff --git a/src/sonic-host-services/tests/hostcfgd/__init__.py b/src/sonic-host-services/tests/hostcfgd/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sonic-host-services/tests/hostcfgd/hostcfgd_passwh_test.py b/src/sonic-host-services/tests/hostcfgd/hostcfgd_passwh_test.py deleted file mode 100755 index da68ad8612..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/hostcfgd_passwh_test.py +++ /dev/null @@ -1,182 +0,0 @@ -import importlib.machinery -import importlib.util -import filecmp -import shutil -import os -import sys -import subprocess -import re - -from parameterized import parameterized -from unittest import TestCase, mock -from tests.hostcfgd.test_passwh_vectors import HOSTCFGD_TEST_PASSWH_VECTOR -from tests.common.mock_configdb import MockConfigDb, MockDBConnector - -test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, "scripts") -src_path = os.path.dirname(modules_path) -templates_path = os.path.join(src_path, "sonic-host-services-data/templates") -output_path = os.path.join(test_path, "hostcfgd/output") -sample_output_path = os.path.join(test_path, "hostcfgd/sample_output") -sys.path.insert(0, modules_path) - -# Load the file under test -hostcfgd_path = os.path.join(scripts_path, 'hostcfgd') -loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path) -spec = importlib.util.spec_from_loader(loader.name, loader) -hostcfgd = importlib.util.module_from_spec(spec) -loader.exec_module(hostcfgd) -sys.modules['hostcfgd'] = hostcfgd - -# Mock swsscommon classes -hostcfgd.ConfigDBConnector = MockConfigDb -hostcfgd.DBConnector = MockDBConnector -hostcfgd.Table = mock.Mock() - -AGE_DICT = { 'MAX_DAYS': {'REGEX_DAYS': r'^PASS_MAX_DAYS[ \t]*(?P\d*)', 'DAYS': 'max_days', 'CHAGE_FLAG': '-M '}, - 'WARN_DAYS': {'REGEX_DAYS': r'^PASS_WARN_AGE[ \t]*(?P\d*)', 'DAYS': 'warn_days', 'CHAGE_FLAG': '-W '} - } - -class TestHostcfgdPASSWH(TestCase): - """ - Test hostcfd daemon - PASSWH - """ - def run_diff(self, file1, file2): - try: - diff_out = subprocess.check_output('diff -ur {} {} || true'.format(file1, file2), shell=True) - return diff_out - except subprocess.CalledProcessError as err: - syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}".format(err.cmd, err.returncode, err.output)) - return -1 - - def get_passw_days(self, login_file, age_type): - days_num = -1 - - regex_days = AGE_DICT[age_type]['REGEX_DAYS'] - days_type = AGE_DICT[age_type]['DAYS'] - - with open(login_file, 'r') as f: - login_def_data = f.readlines() - - for line in login_def_data: - m1 = re.match(regex_days, line) - if m1: - days_num = int(m1.group(days_type)) - break - return days_num - - """ - Check different config - """ - def check_config(self, test_name, test_data, config_name): - t_path = templates_path - op_path = output_path + "/" + test_name + "_" + config_name - sop_path = sample_output_path + "/" + test_name + "_" + config_name - sop_path_common = sample_output_path + "/" + test_name - - hostcfgd.PAM_PASSWORD_CONF_TEMPLATE = t_path + "/common-password.j2" - hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2" - hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2" - hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2" - hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2" - hostcfgd.PAM_PASSWORD_CONF = op_path + "/common-password" - hostcfgd.ETC_LOGIN_DEF = op_path + "/login.defs" - hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic" - hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf" - hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf" - hostcfgd.NSS_CONF = op_path + "/nsswitch.conf" - hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd" - hostcfgd.ETC_PAMD_LOGIN = op_path + "/login" - hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/" - - shutil.rmtree(op_path, ignore_errors=True) - os.mkdir(op_path) - - shutil.copyfile(sop_path_common + "/login.defs.old", op_path + "/login.defs") - MockConfigDb.set_config_db(test_data[config_name]) - host_config_daemon = hostcfgd.HostConfigDaemon() - - try: - passwh_table = host_config_daemon.config_db.get_table('PASSW_HARDENING') - except Exception as e: - syslog.syslog(syslog.LOG_ERR, "failed: get_table 'PASSW_HARDENING', exception={}".format(e)) - passwh_table = [] - - host_config_daemon.passwcfg.load(passwh_table) - - - diff_output = "" - files_to_compare = ['common-password'] - - # check output files exists - for name in files_to_compare: - if not os.path.isfile(sop_path + "/" + name): - raise ValueError('filename: %s not exit' % (sop_path + "/" + name)) - if not os.path.isfile(op_path + "/" + name): - raise ValueError('filename: %s not exit' % (op_path + "/" + name)) - - # deep comparison - match, mismatch, errors = filecmp.cmpfiles(sop_path, op_path, files_to_compare, shallow=False) - - if not match: - for name in files_to_compare: - diff_output += self.run_diff( sop_path + "/" + name,\ - op_path + "/" + name).decode('utf-8') - - self.assertTrue(len(diff_output) == 0, diff_output) - - # compare age data in login.def file. - out_passw_age_days = self.get_passw_days(op_path + "/login.defs", 'MAX_DAYS') - sout_passw_age_days = self.get_passw_days(sop_path + "/login.defs", 'MAX_DAYS') - out_passw_age_warn_days = self.get_passw_days(op_path + "/login.defs", 'WARN_DAYS') - sout_passw_age_warn_days = self.get_passw_days(sop_path + "/login.defs", 'WARN_DAYS') - - self.assertEqual(out_passw_age_days, sout_passw_age_days) - self.assertEqual(out_passw_age_warn_days, sout_passw_age_warn_days) - - @parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR) - def test_hostcfgd_passwh(self, test_name, test_data): - """ - Test PASSWH hostcfd daemon initialization - - Args: - test_name(str): test name - test_data(dict): test data which contains initial Config Db tables, and expected results - - Returns: - None - """ - - self.check_config(test_name, test_data, "default_values") - - @parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR) - def test_hostcfgd_passwh_enable(self, test_name, test_data): - """ - Test PASSWH hostcfd daemon initialization - - Args: - test_name(str): test name - test_data(dict): test data which contains initial Config Db tables, and expected results - - Returns: - None - """ - - self.check_config(test_name, test_data, "enable_feature") - - - @parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR) - def test_hostcfgd_passwh_classes(self, test_name, test_data): - """ - Test PASSWH hostcfd daemon initialization - - Args: - test_name(str): test name - test_data(dict): test data which contains initial Config Db tables, and expected results - - Returns: - None - """ - - self.check_config(test_name, test_data, "enable_digits_class") \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/hostcfgd_radius_test.py b/src/sonic-host-services/tests/hostcfgd/hostcfgd_radius_test.py deleted file mode 100644 index c08cd1829a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/hostcfgd_radius_test.py +++ /dev/null @@ -1,103 +0,0 @@ -import importlib.machinery -import importlib.util -import filecmp -import shutil -import os -import sys -import subprocess -from swsscommon import swsscommon - -from parameterized import parameterized -from unittest import TestCase, mock -from tests.hostcfgd.test_radius_vectors import HOSTCFGD_TEST_RADIUS_VECTOR -from tests.common.mock_configdb import MockConfigDb, MockDBConnector - - -test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, "scripts") -src_path = os.path.dirname(modules_path) -templates_path = os.path.join(src_path, "sonic-host-services-data/templates") -output_path = os.path.join(test_path, "hostcfgd/output") -sample_output_path = os.path.join(test_path, "hostcfgd/sample_output") -sys.path.insert(0, modules_path) - -# Load the file under test -hostcfgd_path = os.path.join(scripts_path, 'hostcfgd') -loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path) -spec = importlib.util.spec_from_loader(loader.name, loader) -hostcfgd = importlib.util.module_from_spec(spec) -loader.exec_module(hostcfgd) -sys.modules['hostcfgd'] = hostcfgd - -# Mock swsscommon classes -hostcfgd.ConfigDBConnector = MockConfigDb -hostcfgd.DBConnector = MockDBConnector -hostcfgd.Table = mock.Mock() - -class TestHostcfgdRADIUS(TestCase): - """ - Test hostcfd daemon - RADIUS - """ - def run_diff(self, file1, file2): - return subprocess.check_output('diff -uR {} {} || true'.format(file1, file2), shell=True) - - - @parameterized.expand(HOSTCFGD_TEST_RADIUS_VECTOR) - def test_hostcfgd_radius(self, test_name, test_data): - """ - Test RADIUS hostcfd daemon initialization - Args: - test_name(str): test name - test_data(dict): test data which contains initial Config Db tables, and expected results - Returns: - None - """ - - t_path = templates_path - op_path = output_path + "/" + test_name - sop_path = sample_output_path + "/" + test_name - - hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2" - hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2" - hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2" - hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2" - hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic" - hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf" - hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf" - hostcfgd.NSS_CONF = op_path + "/nsswitch.conf" - hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd" - hostcfgd.ETC_PAMD_LOGIN = op_path + "/login" - hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/" - - shutil.rmtree( op_path, ignore_errors=True) - os.mkdir( op_path) - - shutil.copyfile( sop_path + "/sshd.old", op_path + "/sshd") - shutil.copyfile( sop_path + "/login.old", op_path + "/login") - - MockConfigDb.set_config_db(test_data["config_db"]) - host_config_daemon = hostcfgd.HostConfigDaemon() - - aaa = host_config_daemon.config_db.get_table('AAA') - - try: - radius_global = host_config_daemon.config_db.get_table('RADIUS') - except: - radius_global = [] - try: - radius_server = \ - host_config_daemon.config_db.get_table('RADIUS_SERVER') - except: - radius_server = [] - - host_config_daemon.aaacfg.load(aaa,[],[],radius_global,radius_server) - dcmp = filecmp.dircmp(sop_path, op_path) - diff_output = "" - for name in dcmp.diff_files: - diff_output += \ - "Diff: file: {} expected: {} output: {}\n".format(\ - name, dcmp.left, dcmp.right) - diff_output += self.run_diff( dcmp.left + "/" + name,\ - dcmp.right + "/" + name) - self.assertTrue(len(diff_output) == 0, diff_output) diff --git a/src/sonic-host-services/tests/hostcfgd/hostcfgd_tacacs_test.py b/src/sonic-host-services/tests/hostcfgd/hostcfgd_tacacs_test.py deleted file mode 100644 index a6478c08dc..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/hostcfgd_tacacs_test.py +++ /dev/null @@ -1,116 +0,0 @@ -import importlib.machinery -import importlib.util -import filecmp -import shutil -import os -import sys -import subprocess -from swsscommon import swsscommon - -from parameterized import parameterized -from unittest import TestCase, mock -from tests.hostcfgd.test_tacacs_vectors import HOSTCFGD_TEST_TACACS_VECTOR -from tests.common.mock_configdb import MockConfigDb, MockDBConnector - -test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, "scripts") -src_path = os.path.dirname(modules_path) -templates_path = os.path.join(src_path, "sonic-host-services-data/templates") -output_path = os.path.join(test_path, "hostcfgd/output") -sample_output_path = os.path.join(test_path, "hostcfgd/sample_output") -sys.path.insert(0, modules_path) - -# Load the file under test -hostcfgd_path = os.path.join(scripts_path, 'hostcfgd') -loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path) -spec = importlib.util.spec_from_loader(loader.name, loader) -hostcfgd = importlib.util.module_from_spec(spec) -loader.exec_module(hostcfgd) -sys.modules['hostcfgd'] = hostcfgd - -# Mock swsscommon classes -hostcfgd.ConfigDBConnector = MockConfigDb -hostcfgd.DBConnector = MockDBConnector -hostcfgd.Table = mock.Mock() - -class TestHostcfgdTACACS(TestCase): - """ - Test hostcfd daemon - TACACS - """ - def run_diff(self, file1, file2): - return subprocess.check_output('diff -uR {} {} || true'.format(file1, file2), shell=True) - - """ - Check different config - """ - def check_config(self, test_name, test_data, config_name): - t_path = templates_path - op_path = output_path + "/" + test_name + "_" + config_name - sop_path = sample_output_path + "/" + test_name + "_" + config_name - - hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2" - hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2" - hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2" - hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2" - hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic" - hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf" - hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf" - hostcfgd.NSS_CONF = op_path + "/nsswitch.conf" - hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd" - hostcfgd.ETC_PAMD_LOGIN = op_path + "/login" - hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/" - - shutil.rmtree( op_path, ignore_errors=True) - os.mkdir( op_path) - - shutil.copyfile( sop_path + "/sshd.old", op_path + "/sshd") - shutil.copyfile( sop_path + "/login.old", op_path + "/login") - - MockConfigDb.set_config_db(test_data[config_name]) - host_config_daemon = hostcfgd.HostConfigDaemon() - - aaa = host_config_daemon.config_db.get_table('AAA') - - try: - tacacs_global = host_config_daemon.config_db.get_table('TACPLUS') - except: - tacacs_global = [] - try: - tacacs_server = \ - host_config_daemon.config_db.get_table('TACPLUS_SERVER') - except: - tacacs_server = [] - - host_config_daemon.aaacfg.load(aaa,tacacs_global,tacacs_server,[],[]) - dcmp = filecmp.dircmp(sop_path, op_path) - diff_output = "" - for name in dcmp.diff_files: - diff_output += \ - "Diff: file: {} expected: {} output: {}\n".format(\ - name, dcmp.left, dcmp.right) - diff_output += self.run_diff( dcmp.left + "/" + name,\ - dcmp.right + "/" + name) - self.assertTrue(len(diff_output) == 0, diff_output) - - - @parameterized.expand(HOSTCFGD_TEST_TACACS_VECTOR) - def test_hostcfgd_tacacs(self, test_name, test_data): - """ - Test TACACS hostcfd daemon initialization - - Args: - test_name(str): test name - test_data(dict): test data which contains initial Config Db tables, and expected results - - Returns: - None - """ - # test local config - self.check_config(test_name, test_data, "config_db_local") - # test remote config - self.check_config(test_name, test_data, "config_db_tacacs") - # test local + tacacs config - self.check_config(test_name, test_data, "config_db_local_and_tacacs") - # test disable accounting - self.check_config(test_name, test_data, "config_db_disable_accounting") diff --git a/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py b/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py deleted file mode 100644 index 786bd1c8f2..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py +++ /dev/null @@ -1,359 +0,0 @@ -import os -import sys -import swsscommon as swsscommon_package -from swsscommon import swsscommon - -from parameterized import parameterized -from sonic_py_common.general import load_module_from_source -from unittest import TestCase, mock - -from .test_vectors import HOSTCFGD_TEST_VECTOR, HOSTCFG_DAEMON_CFG_DB -from tests.common.mock_configdb import MockConfigDb, MockDBConnector - -from pyfakefs.fake_filesystem_unittest import patchfs -from deepdiff import DeepDiff -from unittest.mock import call - -test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, 'scripts') -sys.path.insert(0, modules_path) - -# Load the file under test -hostcfgd_path = os.path.join(scripts_path, 'hostcfgd') -hostcfgd = load_module_from_source('hostcfgd', hostcfgd_path) -hostcfgd.ConfigDBConnector = MockConfigDb -hostcfgd.DBConnector = MockDBConnector -hostcfgd.Table = mock.Mock() - - -class TestFeatureHandler(TestCase): - """Test methods of `FeatureHandler` class. - """ - def checks_config_table(self, feature_table, expected_table): - """Compares `FEATURE` table in `CONFIG_DB` with expected output table. - - Args: - feature_table: A dictionary indicates current `FEATURE` table in `CONFIG_DB`. - expected_table A dictionary indicates the expected `FEATURE` table in `CONFIG_DB`. - - Returns: - Returns True if `FEATURE` table in `CONFIG_DB` was not modified unexpectedly; - otherwise, returns False. - """ - ddiff = DeepDiff(feature_table, expected_table, ignore_order=True) - - return True if not ddiff else False - - def checks_systemd_config_file(self, feature_table): - """Checks whether the systemd configuration file of each feature was created or not - and whether the `Restart=` field in the file is set correctly or not. - - Args: - feature_table: A dictionary indicates `Feature` table in `CONFIG_DB`. - - Returns: Boolean value indicates whether test passed or not. - """ - - truth_table = {'enabled': 'always', - 'disabled': 'no'} - - systemd_config_file_path = os.path.join(hostcfgd.FeatureHandler.SYSTEMD_SERVICE_CONF_DIR, - 'auto_restart.conf') - - for feature_name in feature_table: - auto_restart_status = feature_table[feature_name].get('auto_restart', 'disabled') - if "enabled" in auto_restart_status: - auto_restart_status = "enabled" - elif "disabled" in auto_restart_status: - auto_restart_status = "disabled" - - feature_systemd_config_file_path = systemd_config_file_path.format(feature_name) - is_config_file_existing = os.path.exists(feature_systemd_config_file_path) - assert is_config_file_existing, "Systemd configuration file of feature '{}' does not exist!".format(feature_name) - - with open(feature_systemd_config_file_path) as systemd_config_file: - status = systemd_config_file.read().strip() - assert status == '[Service]\nRestart={}'.format(truth_table[auto_restart_status]) - - def get_state_db_set_calls(self, feature_table): - """Returns a Mock call objects which recorded the `set` calls to `FEATURE` table in `STATE_DB`. - - Args: - feature_table: A dictionary indicates `FEATURE` table in `CONFIG_DB`. - - Returns: - set_call_list: A list indicates Mock call objects. - """ - set_call_list = [] - - for feature_name in feature_table.keys(): - feature_state = "" - if "enabled" in feature_table[feature_name]["state"]: - feature_state = "enabled" - elif "disabled" in feature_table[feature_name]["state"]: - feature_state = "disabled" - else: - feature_state = feature_table[feature_name]["state"] - - set_call_list.append(mock.call(feature_name, [("state", feature_state)])) - - return set_call_list - - @parameterized.expand(HOSTCFGD_TEST_VECTOR) - @patchfs - def test_sync_state_field(self, test_scenario_name, config_data, fs): - """Tests the method `sync_state_field(...)` of `FeatureHandler` class. - - Args: - test_secnario_name: A string indicates different testing scenario. - config_data: A dictionary contains initial `CONFIG_DB` tables and expected results. - - Returns: - Boolean value indicates whether test will pass or not. - """ - # add real path of sesscommon for database_config.json - fs.add_real_paths(swsscommon_package.__path__) - fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR) - - MockConfigDb.set_config_db(config_data['config_db']) - feature_state_table_mock = mock.Mock() - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = config_data['popen_attributes'] - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - - device_config = {} - device_config['DEVICE_METADATA'] = MockConfigDb.CONFIG_DB['DEVICE_METADATA'] - feature_handler = hostcfgd.FeatureHandler(MockConfigDb(), feature_state_table_mock, device_config) - - feature_table = MockConfigDb.CONFIG_DB['FEATURE'] - feature_handler.sync_state_field(feature_table) - - is_any_difference = self.checks_config_table(MockConfigDb.get_config_db()['FEATURE'], - config_data['expected_config_db']['FEATURE']) - assert is_any_difference, "'FEATURE' table in 'CONFIG_DB' is modified unexpectedly!" - - feature_table_state_db_calls = self.get_state_db_set_calls(feature_table) - - self.checks_systemd_config_file(config_data['config_db']['FEATURE']) - mocked_subprocess.check_call.assert_has_calls(config_data['enable_feature_subprocess_calls'], - any_order=True) - mocked_subprocess.check_call.assert_has_calls(config_data['daemon_reload_subprocess_call'], - any_order=True) - feature_state_table_mock.set.assert_has_calls(feature_table_state_db_calls) - self.checks_systemd_config_file(config_data['config_db']['FEATURE']) - - @parameterized.expand(HOSTCFGD_TEST_VECTOR) - @patchfs - def test_handler(self, test_scenario_name, config_data, fs): - """Tests the method `handle(...)` of `FeatureHandler` class. - - Args: - test_secnario_name: A string indicates different testing scenario. - config_data: A dictionary contains initial `CONFIG_DB` tables and expected results. - - Returns: - Boolean value indicates whether test will pass or not. - """ - # add real path of sesscommon for database_config.json - fs.add_real_paths(swsscommon_package.__path__) - fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR) - - MockConfigDb.set_config_db(config_data['config_db']) - feature_state_table_mock = mock.Mock() - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = config_data['popen_attributes'] - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - - device_config = {} - device_config['DEVICE_METADATA'] = MockConfigDb.CONFIG_DB['DEVICE_METADATA'] - feature_handler = hostcfgd.FeatureHandler(MockConfigDb(), feature_state_table_mock, device_config) - - feature_table = MockConfigDb.CONFIG_DB['FEATURE'] - - for feature_name, feature_config in feature_table.items(): - feature_handler.handler(feature_name, 'SET', feature_config) - - self.checks_systemd_config_file(config_data['config_db']['FEATURE']) - mocked_subprocess.check_call.assert_has_calls(config_data['enable_feature_subprocess_calls'], - any_order=True) - mocked_subprocess.check_call.assert_has_calls(config_data['daemon_reload_subprocess_call'], - any_order=True) - - def test_feature_config_parsing(self): - swss_feature = hostcfgd.Feature('swss', { - 'state': 'enabled', - 'auto_restart': 'enabled', - 'has_timer': 'True', - 'has_global_scope': 'False', - 'has_per_asic_scope': 'True', - }) - - assert swss_feature.name == 'swss' - assert swss_feature.state == 'enabled' - assert swss_feature.auto_restart == 'enabled' - assert swss_feature.has_timer - assert not swss_feature.has_global_scope - assert swss_feature.has_per_asic_scope - - def test_feature_config_parsing_defaults(self): - swss_feature = hostcfgd.Feature('swss', { - 'state': 'enabled', - }) - - assert swss_feature.name == 'swss' - assert swss_feature.state == 'enabled' - assert swss_feature.auto_restart == 'disabled' - assert not swss_feature.has_timer - assert swss_feature.has_global_scope - assert not swss_feature.has_per_asic_scope - - -class TesNtpCfgd(TestCase): - """ - Test hostcfd daemon - NtpCfgd - """ - def setUp(self): - MockConfigDb.CONFIG_DB['NTP'] = {'global': {'vrf': 'mgmt', 'src_intf': 'eth0'}} - MockConfigDb.CONFIG_DB['NTP_SERVER'] = {'0.debian.pool.ntp.org': {}} - - def tearDown(self): - MockConfigDb.CONFIG_DB = {} - - def test_ntp_global_update_with_no_servers(self): - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - - ntpcfgd = hostcfgd.NtpCfg() - ntpcfgd.ntp_global_update('global', MockConfigDb.CONFIG_DB['NTP']['global']) - - mocked_subprocess.check_call.assert_not_called() - - def test_ntp_global_update_ntp_servers(self): - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - - ntpcfgd = hostcfgd.NtpCfg() - ntpcfgd.ntp_global_update('global', MockConfigDb.CONFIG_DB['NTP']['global']) - ntpcfgd.ntp_server_update('0.debian.pool.ntp.org', 'SET') - mocked_subprocess.check_call.assert_has_calls([call('systemctl restart ntp-config', shell=True)]) - - def test_loopback_update(self): - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - - ntpcfgd = hostcfgd.NtpCfg() - ntpcfgd.ntp_global = MockConfigDb.CONFIG_DB['NTP']['global'] - ntpcfgd.ntp_servers.add('0.debian.pool.ntp.org') - - ntpcfgd.handle_ntp_source_intf_chg('eth0') - mocked_subprocess.check_call.assert_has_calls([call('systemctl restart ntp-config', shell=True)]) - - -class TestHostcfgdDaemon(TestCase): - - def setUp(self): - MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB) - - def tearDown(self): - MockConfigDb.CONFIG_DB = {} - - @patchfs - def test_feature_events(self, fs): - fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR) - MockConfigDb.event_queue = [('FEATURE', 'dhcp_relay'), - ('FEATURE', 'mux'), - ('FEATURE', 'telemetry')] - daemon = hostcfgd.HostConfigDaemon() - daemon.register_callbacks() - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - try: - daemon.start() - except TimeoutError: - pass - expected = [call('sudo systemctl daemon-reload', shell=True), - call('sudo systemctl unmask dhcp_relay.service', shell=True), - call('sudo systemctl enable dhcp_relay.service', shell=True), - call('sudo systemctl start dhcp_relay.service', shell=True), - call('sudo systemctl daemon-reload', shell=True), - call('sudo systemctl unmask mux.service', shell=True), - call('sudo systemctl enable mux.service', shell=True), - call('sudo systemctl start mux.service', shell=True), - call('sudo systemctl daemon-reload', shell=True), - call('sudo systemctl unmask telemetry.service', shell=True), - call('sudo systemctl unmask telemetry.timer', shell=True), - call('sudo systemctl enable telemetry.timer', shell=True), - call('sudo systemctl start telemetry.timer', shell=True)] - mocked_subprocess.check_call.assert_has_calls(expected) - - # Change the state to disabled - MockConfigDb.CONFIG_DB['FEATURE']['telemetry']['state'] = 'disabled' - MockConfigDb.event_queue = [('FEATURE', 'telemetry')] - try: - daemon.start() - except TimeoutError: - pass - expected = [call('sudo systemctl stop telemetry.timer', shell=True), - call('sudo systemctl disable telemetry.timer', shell=True), - call('sudo systemctl mask telemetry.timer', shell=True), - call('sudo systemctl stop telemetry.service', shell=True), - call('sudo systemctl disable telemetry.timer', shell=True), - call('sudo systemctl mask telemetry.timer', shell=True)] - mocked_subprocess.check_call.assert_has_calls(expected) - - def test_loopback_events(self): - MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB) - MockConfigDb.event_queue = [('NTP', 'global'), - ('NTP_SERVER', '0.debian.pool.ntp.org'), - ('LOOPBACK_INTERFACE', 'Loopback0|10.184.8.233/32')] - daemon = hostcfgd.HostConfigDaemon() - daemon.register_callbacks() - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - try: - daemon.start() - except TimeoutError: - pass - expected = [call('systemctl restart ntp-config', shell=True), - call('iptables -t mangle --append PREROUTING -p tcp --tcp-flags SYN SYN -d 10.184.8.233 -j TCPMSS --set-mss 1460', shell=True), - call('iptables -t mangle --append POSTROUTING -p tcp --tcp-flags SYN SYN -s 10.184.8.233 -j TCPMSS --set-mss 1460', shell=True)] - mocked_subprocess.check_call.assert_has_calls(expected, any_order=True) - - def test_kdump_event(self): - MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB) - daemon = hostcfgd.HostConfigDaemon() - daemon.register_callbacks() - MockConfigDb.event_queue = [('KDUMP', 'config')] - with mock.patch('hostcfgd.subprocess') as mocked_subprocess: - popen_mock = mock.Mock() - attrs = {'communicate.return_value': ('output', 'error')} - popen_mock.configure_mock(**attrs) - mocked_subprocess.Popen.return_value = popen_mock - try: - daemon.start() - except TimeoutError: - pass - expected = [call('sonic-kdump-config --disable', shell=True), - call('sonic-kdump-config --num_dumps 3', shell=True), - call('sonic-kdump-config --memory 0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M', shell=True)] - mocked_subprocess.check_call.assert_has_calls(expected, any_order=True) diff --git a/src/sonic-host-services/tests/hostcfgd/output/.gitignore b/src/sonic-host-services/tests/hostcfgd/output/.gitignore deleted file mode 100644 index 3f4e276cb2..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/output/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Ignore all test generated files -* -# But keep this file -!.gitignore diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/common-auth-sonic deleted file mode 100644 index 87af4cc5c6..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/common-auth-sonic +++ /dev/null @@ -1,21 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -auth [success=1 default=ignore] pam_unix.so nullok try_first_pass - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/radius_nss.conf deleted file mode 100644 index 8c31db9fba..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/radius_nss.conf +++ /dev/null @@ -1,56 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on -debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/tacplus_nss.conf deleted file mode 100644 index eac828491a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/LOCAL/tacplus_nss.conf +++ /dev/null @@ -1,40 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on -debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization -local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING/login.defs.old b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING/login.defs.old deleted file mode 100644 index db8baa4d2b..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING/login.defs.old +++ /dev/null @@ -1,340 +0,0 @@ -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS no - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK 022 - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS 99999 -PASS_MIN_DAYS 0 -PASS_WARN_AGE 7 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN 1000 -UID_MAX 60000 -# System accounts -#SYS_UID_MIN 100 -#SYS_UID_MAX 999 - -# -# Min/max values for automatic gid selection in groupadd -# -GID_MIN 1000 -GID_MAX 60000 -# System accounts -#SYS_GID_MIN 100 -#SYS_GID_MAX 999 - -# -# Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES 5 - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT 60 - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -CHFN_RESTRICT rwh - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME yes - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# If set to yes, userdel will remove the user's group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -# Other former uses of this variable such as setting the umask when -# user==primary group are not used in PAM environments, such as Debian -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -#MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/common-password b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/common-password deleted file mode 100644 index 0da639249c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/common-password +++ /dev/null @@ -1,36 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-password - password-related modules common to all services -# -# This file is included from other service-specific PAM config files, -# and should contain a list of modules that define the services to be -# used to change user passwords. The default is pam_unix. - -# Explanation of pam_unix options: -# The "yescrypt" option enables -#hashed passwords using the yescrypt algorithm, introduced in Debian -#11. Without this option, the default is Unix crypt. Prior releases -#used the option "sha512"; if a shadow password hash will be shared -#between Debian 11 and older releases replace "yescrypt" with "sha512" -#for compatibility . The "obscure" option replaces the old -#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage -#for other options. - -# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. -# To take advantage of this, it is recommended that you configure any -# local modules either before or after the default block, and use -# pam-auth-update to manage selection of other modules. See -# pam-auth-update(8) for details. - -# here are the per-package modules (the "Primary" block) - - -password [success=1 default=ignore] pam_unix.so obscure yescrypt -# here's the fallback if no module succeeds -password requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -password required pam_permit.so -# and here are more per-package modules (the "Additional" block) -# end of pam-auth-update config \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/login.defs b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/login.defs deleted file mode 100644 index db8baa4d2b..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_default_values/login.defs +++ /dev/null @@ -1,340 +0,0 @@ -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS no - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK 022 - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS 99999 -PASS_MIN_DAYS 0 -PASS_WARN_AGE 7 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN 1000 -UID_MAX 60000 -# System accounts -#SYS_UID_MIN 100 -#SYS_UID_MAX 999 - -# -# Min/max values for automatic gid selection in groupadd -# -GID_MIN 1000 -GID_MAX 60000 -# System accounts -#SYS_GID_MIN 100 -#SYS_GID_MAX 999 - -# -# Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES 5 - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT 60 - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -CHFN_RESTRICT rwh - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME yes - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# If set to yes, userdel will remove the user's group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -# Other former uses of this variable such as setting the umask when -# user==primary group are not used in PAM environments, such as Debian -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -#MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/common-password b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/common-password deleted file mode 100644 index 841074728a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/common-password +++ /dev/null @@ -1,39 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-password - password-related modules common to all services -# -# This file is included from other service-specific PAM config files, -# and should contain a list of modules that define the services to be -# used to change user passwords. The default is pam_unix. - -# Explanation of pam_unix options: -# The "yescrypt" option enables -#hashed passwords using the yescrypt algorithm, introduced in Debian -#11. Without this option, the default is Unix crypt. Prior releases -#used the option "sha512"; if a shadow password hash will be shared -#between Debian 11 and older releases replace "yescrypt" with "sha512" -#for compatibility . The "obscure" option replaces the old -#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage -#for other options. - -# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. -# To take advantage of this, it is recommended that you configure any -# local modules either before or after the default block, and use -# pam-auth-update to manage selection of other modules. See -# pam-auth-update(8) for details. - -# here are the per-package modules (the "Primary" block) - -password requisite pam_cracklib.so retry=3 maxrepeat=0 minlen=8 ucredit=0 lcredit=0 dcredit=-1 ocredit=0 enforce_for_root - -password required pam_pwhistory.so remember=0 use_authtok enforce_for_root - -password [success=1 default=ignore] pam_unix.so obscure yescrypt -# here's the fallback if no module succeeds -password requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -password required pam_permit.so -# and here are more per-package modules (the "Additional" block) -# end of pam-auth-update config \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/login.defs b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/login.defs deleted file mode 100644 index db8baa4d2b..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_digits_class/login.defs +++ /dev/null @@ -1,340 +0,0 @@ -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS no - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK 022 - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS 99999 -PASS_MIN_DAYS 0 -PASS_WARN_AGE 7 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN 1000 -UID_MAX 60000 -# System accounts -#SYS_UID_MIN 100 -#SYS_UID_MAX 999 - -# -# Min/max values for automatic gid selection in groupadd -# -GID_MIN 1000 -GID_MAX 60000 -# System accounts -#SYS_GID_MIN 100 -#SYS_GID_MAX 999 - -# -# Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES 5 - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT 60 - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -CHFN_RESTRICT rwh - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME yes - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# If set to yes, userdel will remove the user's group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -# Other former uses of this variable such as setting the umask when -# user==primary group are not used in PAM environments, such as Debian -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -#MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/common-password b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/common-password deleted file mode 100644 index a66c1b1ade..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/common-password +++ /dev/null @@ -1,39 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-password - password-related modules common to all services -# -# This file is included from other service-specific PAM config files, -# and should contain a list of modules that define the services to be -# used to change user passwords. The default is pam_unix. - -# Explanation of pam_unix options: -# The "yescrypt" option enables -#hashed passwords using the yescrypt algorithm, introduced in Debian -#11. Without this option, the default is Unix crypt. Prior releases -#used the option "sha512"; if a shadow password hash will be shared -#between Debian 11 and older releases replace "yescrypt" with "sha512" -#for compatibility . The "obscure" option replaces the old -#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage -#for other options. - -# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. -# To take advantage of this, it is recommended that you configure any -# local modules either before or after the default block, and use -# pam-auth-update to manage selection of other modules. See -# pam-auth-update(8) for details. - -# here are the per-package modules (the "Primary" block) - -password requisite pam_cracklib.so retry=3 maxrepeat=0 minlen=8 ucredit=-1 lcredit=-1 dcredit=-1 ocredit=-1 reject_username enforce_for_root - -password required pam_pwhistory.so remember=10 use_authtok enforce_for_root - -password [success=1 default=ignore] pam_unix.so obscure yescrypt -# here's the fallback if no module succeeds -password requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -password required pam_permit.so -# and here are more per-package modules (the "Additional" block) -# end of pam-auth-update config \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/login.defs b/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/login.defs deleted file mode 100644 index 1c8b360a14..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/PASSWORD_HARDENING_enable_feature/login.defs +++ /dev/null @@ -1,340 +0,0 @@ -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS no - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK 022 - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS 180 -PASS_MIN_DAYS 0 -PASS_WARN_AGE 15 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN 1000 -UID_MAX 60000 -# System accounts -#SYS_UID_MIN 100 -#SYS_UID_MAX 999 - -# -# Min/max values for automatic gid selection in groupadd -# -GID_MIN 1000 -GID_MAX 60000 -# System accounts -#SYS_GID_MIN 100 -#SYS_GID_MAX 999 - -# -# Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES 5 - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT 60 - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -CHFN_RESTRICT rwh - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME yes - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# If set to yes, userdel will remove the user's group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -# Other former uses of this variable such as setting the umask when -# user==primary group are not used in PAM environments, such as Debian -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -#MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.1_1645.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.1_1645.conf deleted file mode 100644 index ffdda7b401..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.1_1645.conf +++ /dev/null @@ -1,2 +0,0 @@ -# server[:port] shared_secret timeout(s) source_ip vrf -[10.10.10.1]:1645 pass1 1 \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.2_1645.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.2_1645.conf deleted file mode 100644 index 3b680807cb..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/10.10.10.2_1645.conf +++ /dev/null @@ -1,2 +0,0 @@ -# server[:port] shared_secret timeout(s) source_ip vrf -[10.10.10.2]:1645 pass2 2 \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/common-auth-sonic deleted file mode 100644 index d6b148633e..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/common-auth-sonic +++ /dev/null @@ -1,30 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -# root user can only be authenticated locally. Jump to local. -auth [success=2 default=ignore] pam_succeed_if.so user = root -# For the RADIUS servers, on success jump to the cache the MPL(Privilege) -auth [success=3 new_authtok_reqd=done default=ignore auth_err=die] pam_radius_auth.so conf=/etc/pam_radius_auth.d/10.10.10.1_1645.conf privilege_level protocol=pap retry=1 nas_ip_address=10.10.10.10 debug try_first_pass -auth [success=2 new_authtok_reqd=done default=ignore auth_err=die] pam_radius_auth.so conf=/etc/pam_radius_auth.d/10.10.10.2_1645.conf privilege_level protocol=chap retry=2 nas_ip_address=10.10.10.10 debug try_first_pass -# Local -auth [success=done new_authtok_reqd=done default=ignore auth_err=die maxtries=die] pam_unix.so nullok try_first_pass -auth requisite pam_deny.so -# Cache MPL(Privilege) -auth [success=1 default=ignore] pam_exec.so /usr/sbin/cache_radius - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/radius_nss.conf deleted file mode 100644 index 8c31db9fba..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/radius_nss.conf +++ /dev/null @@ -1,56 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on -debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/tacplus_nss.conf deleted file mode 100644 index eac828491a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/RADIUS/tacplus_nss.conf +++ /dev/null @@ -1,40 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on -debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization -local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/common-auth-sonic deleted file mode 100644 index 87af4cc5c6..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/common-auth-sonic +++ /dev/null @@ -1,21 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -auth [success=1 default=ignore] pam_unix.so nullok try_first_pass - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/radius_nss.conf deleted file mode 100644 index 1567b6e645..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/radius_nss.conf +++ /dev/null @@ -1,55 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/tacplus_nss.conf deleted file mode 100644 index bffd6f7039..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_disable_accounting/tacplus_nss.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization -local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 -server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default -server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/common-auth-sonic deleted file mode 100644 index 87af4cc5c6..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/common-auth-sonic +++ /dev/null @@ -1,21 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -auth [success=1 default=ignore] pam_unix.so nullok try_first_pass - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/radius_nss.conf deleted file mode 100644 index 1567b6e645..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/radius_nss.conf +++ /dev/null @@ -1,55 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/tacplus_nss.conf deleted file mode 100644 index d24cab57d1..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local/tacplus_nss.conf +++ /dev/null @@ -1,42 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting -local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization -local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 -server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default -server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/common-auth-sonic deleted file mode 100644 index 87af4cc5c6..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/common-auth-sonic +++ /dev/null @@ -1,21 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -auth [success=1 default=ignore] pam_unix.so nullok try_first_pass - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/radius_nss.conf deleted file mode 100644 index 1567b6e645..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/radius_nss.conf +++ /dev/null @@ -1,55 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/tacplus_nss.conf deleted file mode 100644 index 431cbcec0a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_local_and_tacacs/tacplus_nss.conf +++ /dev/null @@ -1,44 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting -local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting -tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization -local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization -tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 -server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default -server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/common-auth-sonic b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/common-auth-sonic deleted file mode 100644 index 87af4cc5c6..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/common-auth-sonic +++ /dev/null @@ -1,21 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# -# /etc/pam.d/common-auth- authentication settings common to all services -# This file is included from other service-specific PAM config files, -# and should contain a list of the authentication modules that define -# the central authentication scheme for use on the system -# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the -# traditional Unix authentication mechanisms. -# -# here are the per-package modules (the "Primary" block) - -auth [success=1 default=ignore] pam_unix.so nullok try_first_pass - -# -# here's the fallback if no module succeeds -auth requisite pam_deny.so -# prime the stack with a positive return value if there isn't one already; -# this avoids us returning an error just because nothing sets a success code -# since the modules above will each just jump around -auth required pam_permit.so -# and here are more per-package modules (the "Additional" block) diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login deleted file mode 100644 index 80ba645281..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth-sonic - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login.old deleted file mode 100644 index 07ff95407c..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/login.old +++ /dev/null @@ -1,116 +0,0 @@ -# -# The PAM configuration file for the Shadow `login' service -# - -# Enforce a minimal delay in case of failure (in microseconds). -# (Replaces the `FAIL_DELAY' setting from login.defs) -# Note that other modules may require another minimal delay. (for example, -# to disable any delay, you should add the nodelay option to pam_unix) -auth optional pam_faildelay.so delay=3000000 - -# Outputs an issue file prior to each login prompt (Replaces the -# ISSUE_FILE option from login.defs). Uncomment for use -# auth required pam_issue.so issue=/etc/issue - -# Disallows root logins except on tty's listed in /etc/securetty -# (Replaces the `CONSOLE' setting from login.defs) -# -# With the default control of this module: -# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] -# root will not be prompted for a password on insecure lines. -# if an invalid username is entered, a password is prompted (but login -# will eventually be rejected) -# -# You can change it to a "requisite" module if you think root may mis-type -# her login and should not be prompted for a password in that case. But -# this will leave the system as vulnerable to user enumeration attacks. -# -# You can change it to a "required" module if you think it permits to -# guess valid user names of your system (invalid user names are considered -# as possibly being root on insecure lines), but root passwords may be -# communicated over insecure lines. -auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so - -# Disallows other than root logins when /etc/nologin exists -# (Replaces the `NOLOGINS_FILE' option from login.defs) -auth requisite pam_nologin.so - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible -# that a module could execute code in the wrong domain. -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Sets the loginuid process attribute -session required pam_loginuid.so - -# SELinux needs to intervene at login time to ensure that the process -# starts in the proper default security context. Only sessions which are -# intended to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open -# When the module is present, "required" would be sufficient (When SELinux -# is disabled, this returns success.) - -# This module parses environment configuration file(s) -# and also allows you to use an extended config -# file /etc/security/pam_env.conf. -# -# parsing /etc/environment needs "readenv=1" -session required pam_env.so readenv=1 -# locale variables are also kept into /etc/default/locale in etch -# reading this file *in addition to /etc/environment* does not hurt -session required pam_env.so readenv=1 envfile=/etc/default/locale - -# Standard Un*x authentication. -@include common-auth - -# This allows certain extra groups to be granted to a user -# based on things like time of day, tty, service, and user. -# Please edit /etc/security/group.conf to fit your needs -# (Replaces the `CONSOLE_GROUPS' option in login.defs) -auth optional pam_group.so - -# Uncomment and edit /etc/security/time.conf if you need to set -# time restraint on logins. -# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs -# as well as /etc/porttime) -# account requisite pam_time.so - -# Uncomment and edit /etc/security/access.conf if you need to -# set access limits. -# (Replaces /etc/login.access file) -# account required pam_access.so - -# Sets up user limits according to /etc/security/limits.conf -# (Replaces the use of /etc/limits in old login) -session required pam_limits.so - -# Prints the last login info upon successful login -# (Replaces the `LASTLOG_ENAB' option from login.defs) -session optional pam_lastlog.so - -# Prints the message of the day upon successful login. -# (Replaces the `MOTD_FILE' option in login.defs) -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Prints the status of the user's mailbox upon successful login -# (Replaces the `MAIL_CHECK_ENAB' option from login.defs). -# -# This also defines the MAIL environment variable -# However, userdel also needs MAIL_DIR and MAIL_FILE variables -# in /etc/login.defs to make sure that removing a user -# also removes the user's mail spool file. -# See comments in /etc/login.defs -session optional pam_mail.so standard - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x account and session -@include common-account -@include common-session -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/radius_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/radius_nss.conf deleted file mode 100644 index 1567b6e645..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/radius_nss.conf +++ /dev/null @@ -1,55 +0,0 @@ -#THIS IS AN AUTO-GENERATED FILE -# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2 -# RADIUS NSS Configuration File -# -# Debug: on|off|trace -# Default: off -# -# debug=on - -# -# User Privilege: -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell - -# Eg: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell -# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell -# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell -# - -# many_to_one: -# y: Map RADIUS users to one local user per privilege. -# n: Create local user account on first successful authentication. -# Default: n -# - -# Eg: -# many_to_one=y -# - -# unconfirmed_disallow: -# y: Do not allow unconfirmed users (users created before authentication) -# n: Allow unconfirmed users. -# Default: n - -# Eg: -# unconfirmed_disallow=y -# - -# unconfirmed_ageout: -# : Wait time before purging unconfirmed users -# Default: 600 -# - -# Eg: -# unconfirmed_ageout=900 -# - -# unconfirmed_regexp: -# : The RE to match the command line of processes for which the -# creation of unconfirmed users are to be allowed. -# Default: (.*: \[priv\])|(.*: \[accepted\]) -# where: is the unconfirmed user. -# \ No newline at end of file diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd deleted file mode 100644 index c025af353d..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth-sonic - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd.old b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd.old deleted file mode 100644 index d70b384bd9..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/sshd.old +++ /dev/null @@ -1,55 +0,0 @@ -# PAM configuration for the Secure Shell service - -# Standard Un*x authentication. -@include common-auth - -# Disallow non-root logins when /etc/nologin exists. -account required pam_nologin.so - -# Uncomment and edit /etc/security/access.conf if you need to set complex -# access limits that are hard to express in sshd_config. -# account required pam_access.so - -# Standard Un*x authorization. -@include common-account - -# SELinux needs to be the first session rule. This ensures that any -# lingering context has been cleared. Without this it is possible that a -# module could execute code in the wrong domain. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close - -# Set the loginuid process attribute. -session required pam_loginuid.so - -# Create a new session keyring. -session optional pam_keyinit.so force revoke - -# Standard Un*x session setup and teardown. -@include common-session - -# Print the message of the day upon successful login. -# This includes a dynamically generated part from /run/motd.dynamic -# and a static (admin-editable) part from /etc/motd. -session optional pam_motd.so motd=/run/motd.dynamic -session optional pam_motd.so noupdate - -# Print the status of the user's mailbox upon successful login. -session optional pam_mail.so standard noenv # [1] - -# Set up user limits from /etc/security/limits.conf. -session required pam_limits.so - -# Read environment variables from /etc/environment and -# /etc/security/pam_env.conf. -session required pam_env.so # [1] -# In Debian 4.0 (etch), locale-related environment variables were moved to -# /etc/default/locale, so read that as well. -session required pam_env.so user_readenv=1 envfile=/etc/default/locale - -# SELinux needs to intervene at login time to ensure that the process starts -# in the proper default security context. Only sessions which are intended -# to run in the user's context should be run after this. -session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open - -# Standard Un*x password updating. -@include common-password diff --git a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/tacplus_nss.conf b/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/tacplus_nss.conf deleted file mode 100644 index c9b8ab2944..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/sample_output/TACACS_config_db_tacacs/tacplus_nss.conf +++ /dev/null @@ -1,42 +0,0 @@ -# Configuration for libnss-tacplus - -# debug - If you want to open debug log, set it on -# Default: off -# debug=on - -# local_accounting - If you want to local accounting, set it -# Default: None -# local_accounting - -# tacacs_accounting - If you want to tacacs+ accounting, set it -# Default: None -# tacacs_accounting -tacacs_accounting - -# local_authorization - If you want to local authorization, set it -# Default: None -# local_authorization - -# tacacs_authorization - If you want to tacacs+ authorization, set it -# Default: None -# tacacs_authorization -tacacs_authorization - -# src_ip - set source address of TACACS+ protocol packets -# Default: None (auto source ip address) -# src_ip=2.2.2.2 - -# server - set ip address, tcp port, secret string and timeout for TACACS+ servers -# Default: None (no TACACS+ server) -# server=1.1.1.1:49,secret=test,timeout=3 -server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default -server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt - -# user_priv - set the map between TACACS+ user privilege and local user's passwd -# Default: -# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash -# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash - -# many_to_one - create one local user for many TACACS+ users which has the same privilege -# Default: many_to_one=n -# many_to_one=y diff --git a/src/sonic-host-services/tests/hostcfgd/test_passwh_vectors.py b/src/sonic-host-services/tests/hostcfgd/test_passwh_vectors.py deleted file mode 100644 index acf1c76711..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/test_passwh_vectors.py +++ /dev/null @@ -1,244 +0,0 @@ -""" - hostcfgd test password hardening vector -""" -HOSTCFGD_TEST_PASSWH_VECTOR = [ - [ - "PASSWORD_HARDENING", - { - "default_values":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "disabled", - "expiration": "180", - "expiration_warning": "15", - "history_cnt": "10", - "len_min": "8", - "reject_user_passw_match": "True", - "lower_class": "True", - "upper_class": "True", - "digits_class": "True", - "special_class": "True" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - } - }, - "enable_feature":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "enabled", - "expiration": "180", - "expiration_warning": "15", - "history_cnt": "10", - "len_min": "8", - "reject_user_passw_match": "True", - "lower_class": "True", - "upper_class": "True", - "digits_class": "True", - "special_class": "True" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - } - }, - "enable_digits_class":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "enabled", - "expiration": "0", - "expiration_warning": "0", - "history_cnt": "0", - "len_min": "8", - "reject_user_passw_match": "False", - "lower_class": "False", - "upper_class": "False", - "digits_class": "True", - "special_class": "False" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - } - }, - "enable_lower_class":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "enabled", - "expiration": "0", - "expiration_warning": "0", - "history_cnt": "0", - "len_min": "8", - "reject_user_passw_match": "False", - "lower_class": "True", - "upper_class": "False", - "digits_class": "False", - "special_class": "False" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - }, - "enable_upper_class":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "enabled", - "expiration": "0", - "expiration_warning": "0", - "history_cnt": "0", - "len_min": "8", - "reject_user_passw_match": "False", - "lower_class": "False", - "upper_class": "True", - "digits_class": "False", - "special_class": "False" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - } - }, - "enable_special_class":{ - "PASSW_HARDENING": { - "POLICIES":{ - "state": "enabled", - "expiration": "0", - "expiration_warning": "0", - "history_cnt": "0", - "len_min": "8", - "reject_user_passw_match": "False", - "lower_class": "False", - "upper_class": "False", - "digits_class": "False", - "special_class": "True" - } - }, - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - } - } - } - ] -] diff --git a/src/sonic-host-services/tests/hostcfgd/test_radius_vectors.py b/src/sonic-host-services/tests/hostcfgd/test_radius_vectors.py deleted file mode 100644 index df10499e1a..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/test_radius_vectors.py +++ /dev/null @@ -1,181 +0,0 @@ -from unittest.mock import call - -""" - hostcfgd test radius vector -""" -HOSTCFGD_TEST_RADIUS_VECTOR = [ - [ - "RADIUS", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "radius,local", - "debug": "True", - } - }, - "RADIUS": { - "global": { - "nas_ip": "10.10.10.10", - "auth_port": "1645", - "auth_type": "mschapv2", - "retransmit": "2", - "timeout": "3", - "passkey": "pass", - } - }, - "RADIUS_SERVER": { - "10.10.10.1": { - "auth_type": "pap", - "retransmit": "1", - "timeout": "1", - "passkey": "pass1", - }, - "10.10.10.2": { - "auth_type": "chap", - "retransmit": "2", - "timeout": "2", - "passkey": "pass2", - } - }, - }, - "expected_config_db": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "AAA": { - "authentication": { - "login": "radius,local", - "debug": "True", - } - }, - "RADIUS": { - "global": { - "nas_ip": "10.10.10.10", - "auth_port": "1645", - "auth_type": "mschapv2", - "retransmit": "2", - "timeout": "3", - "passkey": "pass", - } - }, - "RADIUS_SERVER": { - "10.10.10.1": { - "auth_type": "pap", - "retransmit": "1", - "timeout": "1", - "passkey": "pass1", - }, - "10.10.10.2": { - "auth_type": "chap", - "retransmit": "2", - "timeout": "2", - "passkey": "pass2", - } - }, - }, - "expected_subprocess_calls": [ - call("service aaastatsd start", shell=True), - ], - } - ], - [ - "LOCAL", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "local", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "local", - "debug": "True", - } - }, - }, - "expected_config_db": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "local", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "AAA": { - "authentication": { - "login": "local", - "debug": "True", - } - }, - }, - "expected_subprocess_calls": [ - call("service aaastatsd start", shell=True), - ], - }, - ], -] diff --git a/src/sonic-host-services/tests/hostcfgd/test_tacacs_vectors.py b/src/sonic-host-services/tests/hostcfgd/test_tacacs_vectors.py deleted file mode 100644 index 38d0012fa8..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/test_tacacs_vectors.py +++ /dev/null @@ -1,260 +0,0 @@ -from unittest.mock import call - -""" - hostcfgd test tacacs vector -""" -HOSTCFGD_TEST_TACACS_VECTOR = [ - [ - "TACACS", - { - "config_db_local": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "local" - }, - "authorization": { - "login": "local" - }, - "accounting": { - "login": "local" - } - }, - "TACPLUS": { - "global": { - "auth_type": "chap", - "timeout": 5, - "passkey": "dellsonic", - "src_intf": "Ethernet0" - } - }, - "TACPLUS_SERVER": { - "192.168.1.1" : { - "priority": 5, - "tcp_port": 50, - "timeout": 10, - "auth_type": "chap", - "passkey": "dellsonic", - "vrf": "default" - }, - "192.168.1.2" : { - "priority": 2, - "tcp_port": 51, - "timeout": 15, - "auth_type": "pap", - "passkey": "dellsonic1", - "vrf": "mgmt" - } - }, - }, - "config_db_tacacs": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "local" - }, - "authorization": { - "login": "tacacs+" - }, - "accounting": { - "login": "tacacs+" - } - }, - "TACPLUS": { - "global": { - "auth_type": "chap", - "timeout": 5, - "passkey": "dellsonic", - "src_intf": "Ethernet0" - } - }, - "TACPLUS_SERVER": { - "192.168.1.1" : { - "priority": 5, - "tcp_port": 50, - "timeout": 10, - "auth_type": "chap", - "passkey": "dellsonic", - "vrf": "default" - }, - "192.168.1.2" : { - "priority": 2, - "tcp_port": 51, - "timeout": 15, - "auth_type": "pap", - "passkey": "dellsonic1", - "vrf": "mgmt" - } - }, - }, - "config_db_local_and_tacacs": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "local" - }, - "authorization": { - "login": "tacacs+ local" - }, - "accounting": { - "login": "tacacs+ local" - } - }, - "TACPLUS": { - "global": { - "auth_type": "chap", - "timeout": 5, - "passkey": "dellsonic", - "src_intf": "Ethernet0" - } - }, - "TACPLUS_SERVER": { - "192.168.1.1" : { - "priority": 5, - "tcp_port": 50, - "timeout": 10, - "auth_type": "chap", - "passkey": "dellsonic", - "vrf": "default" - }, - "192.168.1.2" : { - "priority": 2, - "tcp_port": 51, - "timeout": 15, - "auth_type": "pap", - "passkey": "dellsonic1", - "vrf": "mgmt" - } - }, - }, - "config_db_disable_accounting": { - "DEVICE_METADATA": { - "localhost": { - "hostname": "radius", - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "AAA": { - "authentication": { - "login": "local" - }, - "authorization": { - "login": "local" - }, - "accounting": { - "login": "disable" - } - }, - "TACPLUS": { - "global": { - "auth_type": "chap", - "timeout": 5, - "passkey": "dellsonic", - "src_intf": "Ethernet0" - } - }, - "TACPLUS_SERVER": { - "192.168.1.1" : { - "priority": 5, - "tcp_port": 50, - "timeout": 10, - "auth_type": "chap", - "passkey": "dellsonic", - "vrf": "default" - }, - "192.168.1.2" : { - "priority": 2, - "tcp_port": 51, - "timeout": 15, - "auth_type": "pap", - "passkey": "dellsonic1", - "vrf": "mgmt" - } - }, - } - } - ] -] diff --git a/src/sonic-host-services/tests/hostcfgd/test_vectors.py b/src/sonic-host-services/tests/hostcfgd/test_vectors.py deleted file mode 100644 index 43754252c0..0000000000 --- a/src/sonic-host-services/tests/hostcfgd/test_vectors.py +++ /dev/null @@ -1,567 +0,0 @@ -from unittest.mock import call - -""" - hostcfgd test vector -""" -HOSTCFGD_TEST_VECTOR = [ - [ - "DualTorCase", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "expected_config_db": { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "enabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "enable_feature_subprocess_calls": [ - call("sudo systemctl unmask dhcp_relay.service", shell=True), - call("sudo systemctl enable dhcp_relay.service", shell=True), - call("sudo systemctl start dhcp_relay.service", shell=True), - call("sudo systemctl unmask mux.service", shell=True), - call("sudo systemctl enable mux.service", shell=True), - call("sudo systemctl start mux.service", shell=True), - call("sudo systemctl unmask telemetry.service", shell=True), - call("sudo systemctl unmask telemetry.timer", shell=True), - call("sudo systemctl enable telemetry.timer", shell=True), - call("sudo systemctl start telemetry.timer", shell=True), - ], - "daemon_reload_subprocess_call": [ - call("sudo systemctl daemon-reload", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error') - }, - }, - ], - [ - "SingleToRCase", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "type": "ToR", - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - "sflow": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "always_enabled" - }, - }, - }, - "expected_config_db": { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "disabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "always_disabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - "sflow": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "always_enabled" - }, - }, - }, - "enable_feature_subprocess_calls": [ - call("sudo systemctl stop mux.service", shell=True), - call("sudo systemctl disable mux.service", shell=True), - call("sudo systemctl mask mux.service", shell=True), - call("sudo systemctl unmask telemetry.service", shell=True), - call("sudo systemctl unmask telemetry.timer", shell=True), - call("sudo systemctl enable telemetry.timer", shell=True), - call("sudo systemctl start telemetry.timer", shell=True), - call("sudo systemctl unmask sflow.service", shell=True), - call("sudo systemctl enable sflow.service", shell=True), - call("sudo systemctl start sflow.service", shell=True), - ], - "daemon_reload_subprocess_call": [ - call("sudo systemctl daemon-reload", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error') - }, - }, - ], - [ - "T1Case", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "type": "T1", - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "expected_config_db": { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "disabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "always_disabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "enable_feature_subprocess_calls": [ - call("sudo systemctl stop mux.service", shell=True), - call("sudo systemctl disable mux.service", shell=True), - call("sudo systemctl mask mux.service", shell=True), - call("sudo systemctl unmask telemetry.service", shell=True), - call("sudo systemctl unmask telemetry.timer", shell=True), - call("sudo systemctl enable telemetry.timer", shell=True), - call("sudo systemctl start telemetry.timer", shell=True), - ], - "daemon_reload_subprocess_call": [ - call("sudo systemctl daemon-reload", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error') - }, - }, - ], - [ - "SingleToRCase_DHCP_Relay_Enabled", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "type": "ToR", - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "expected_config_db": { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "always_disabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "enable_feature_subprocess_calls": [ - call("sudo systemctl unmask dhcp_relay.service", shell=True), - call("sudo systemctl enable dhcp_relay.service", shell=True), - call("sudo systemctl start dhcp_relay.service", shell=True), - call("sudo systemctl stop mux.service", shell=True), - call("sudo systemctl disable mux.service", shell=True), - call("sudo systemctl mask mux.service", shell=True), - call("sudo systemctl unmask telemetry.service", shell=True), - call("sudo systemctl unmask telemetry.timer", shell=True), - call("sudo systemctl enable telemetry.timer", shell=True), - call("sudo systemctl start telemetry.timer", shell=True), - ], - "daemon_reload_subprocess_call": [ - call("sudo systemctl daemon-reload", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('output', 'error') - }, - }, - ], - [ - "DualTorCaseWithNoSystemCalls", - { - "config_db": { - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "expected_config_db": { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "enabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - }, - "enable_feature_subprocess_calls": [], - "daemon_reload_subprocess_call": [ - call("sudo systemctl daemon-reload", shell=True), - ], - "popen_attributes": { - 'communicate.return_value': ('enabled', 'error') - }, - } - ] -] - -HOSTCFG_DAEMON_CFG_DB = { - "FEATURE": { - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}" - }, - "mux": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "set_owner": "local", - "state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "set_owner": "kube", - "state": "enabled", - "status": "enabled" - }, - }, - "KDUMP": { - "config": { - - } - }, - "NTP": { - "global": { - "vrf": "default", - "src_intf": "eth0;Loopback0" - } - }, - "NTP_SERVER": { - "0.debian.pool.ntp.org": {} - }, - "LOOPBACK_INTERFACE": { - "Loopback0|10.184.8.233/32": { - "scope": "global", - "family": "IPv4" - } - }, - "DEVICE_METADATA": { - "localhost": { - "subtype": "DualToR", - "type": "ToRRouter", - } - } -} diff --git a/src/sonic-host-services/tests/mock_connector.py b/src/sonic-host-services/tests/mock_connector.py deleted file mode 100644 index d32017ff84..0000000000 --- a/src/sonic-host-services/tests/mock_connector.py +++ /dev/null @@ -1,24 +0,0 @@ -class MockConnector(object): - STATE_DB = None - data = {} - - def __init__(self, host): - pass - - def connect(self, db_id): - pass - - def get(self, db_id, key, field): - return MockConnector.data[key][field] - - def keys(self, db_id, pattern): - match = pattern.split('*')[0] - ret = [] - for key in MockConnector.data.keys(): - if match in key: - ret.append(key) - - return ret - - def get_all(self, db_id, key): - return MockConnector.data[key] diff --git a/src/sonic-host-services/tests/procdockerstatsd_test.py b/src/sonic-host-services/tests/procdockerstatsd_test.py deleted file mode 100644 index 4db198be7d..0000000000 --- a/src/sonic-host-services/tests/procdockerstatsd_test.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys -import os -import pytest - -from swsscommon import swsscommon -from sonic_py_common.general import load_module_from_source - -from .mock_connector import MockConnector - -swsscommon.SonicV2Connector = MockConnector - -test_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(test_path) -scripts_path = os.path.join(modules_path, "scripts") -sys.path.insert(0, modules_path) - -# Load the file under test -procdockerstatsd_path = os.path.join(scripts_path, 'procdockerstatsd') -procdockerstatsd = load_module_from_source('procdockerstatsd', procdockerstatsd_path) - -class TestProcDockerStatsDaemon(object): - def test_convert_to_bytes(self): - test_data = [ - ('1B', 1), - ('500B', 500), - ('1KB', 1000), - ('500KB', 500000), - ('1MB', 1000000), - ('500MB', 500000000), - ('1MiB', 1048576), - ('500MiB', 524288000), - ('66.41MiB', 69635932), - ('333.6MiB', 349804954), - ('1GiB', 1073741824), - ('500GiB', 536870912000), - ('7.751GiB', 8322572878) - ] - - pdstatsd = procdockerstatsd.ProcDockerStats(procdockerstatsd.SYSLOG_IDENTIFIER) - - for test_input, expected_output in test_data: - res = pdstatsd.convert_to_bytes(test_input) - assert res == expected_output