Update submodule for sonic-host-services (#11446)

Why I did it
Enable UT code coverage in sonic-buildimage repo submodule and enable LGTM

How I did it
create separate repo for sonic-host-services in sonic-net, and update submodule for sonic-buildimage

How to verify it
Build image
This commit is contained in:
ganglv 2022-07-14 16:49:17 +08:00 committed by GitHub
parent a86f59eda8
commit cf7a8f8c69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
88 changed files with 4 additions and 10410 deletions

3
.gitmodules vendored
View File

@ -109,3 +109,6 @@
[submodule "src/dhcprelay"]
path = src/dhcprelay
url = https://github.com/sonic-net/sonic-dhcp-relay.git
[submodule "src/sonic-host-services"]
path = src/sonic-host-services
url = https://github.com/sonic-net/sonic-host-services

@ -0,0 +1 @@
Subproject commit 709046bbec9d05c9bf06e7c54a23ae0f9c970281

View File

@ -1,22 +0,0 @@
# Compiled Python files
*.pyc
scripts/caclmgrdc
scripts/hostcfgdc
scripts/aaastatsdc
scripts/procdockerstatsdc
# Generated by packaging
*.egg-info/
.eggs/
build/
dist/
# Unit test coverage
.coverage
.pytest_cache/
coverage.xml
htmlcov/
test-results.xml
# Unit test scratchpad
tests/hostcfgd/output/*

View File

@ -1,34 +0,0 @@
"""Base class for host modules"""
import dbus.service
import dbus
BUS_NAME_BASE = 'org.SONiC.HostService'
BUS_PATH = '/org/SONiC/HostService'
def bus_name(mod_name):
"""Return the bus name for the service"""
return BUS_NAME_BASE + '.' + mod_name
def bus_path(mod_name):
"""Return the bus path for the service"""
return BUS_PATH + '/' + mod_name
method = dbus.service.method
class HostService(dbus.service.Object):
"""Service class for top level DBus endpoint"""
def __init__(self, mod_name):
self.bus = dbus.SystemBus()
self.bus_name = dbus.service.BusName(BUS_NAME_BASE, self.bus)
super(HostService, self).__init__(self.bus_name, BUS_PATH)
class HostModule(dbus.service.Object):
"""Base class for all host modules"""
def __init__(self, mod_name):
self.bus = dbus.SystemBus()
self.bus_name = dbus.service.BusName(bus_name(mod_name), self.bus)
super(HostModule, self).__init__(self.bus_name, bus_path(mod_name))
def register():
return HostService, "host_service"

View File

@ -1,50 +0,0 @@
"""Show techsupport command handler"""
import host_service
import subprocess
import re
MOD_NAME = 'showtech'
class Showtech(host_service.HostModule):
"""DBus endpoint that executes the "show techsupport" command
"""
@host_service.method(host_service.bus_name(MOD_NAME), in_signature='s', out_signature='is')
def info(self, date):
ERROR_TAR_FAILED = 5
ERROR_PROCFS_SAVE_FAILED = 6
ERROR_INVALID_ARGUMENT = 10
err_dict = {ERROR_INVALID_ARGUMENT: 'Invalid input: Incorrect DateTime format',
ERROR_TAR_FAILED: 'Failure saving information into compressed output file',
ERROR_PROCFS_SAVE_FAILED: 'Saving of process information failed'}
cmd = ['/usr/local/bin/generate_dump']
if date:
cmd.append("-s")
cmd.append(date)
try:
result = subprocess.run(cmd, capture_output=True, text=True,
check=True)
except subprocess.CalledProcessError as err:
errmsg = err_dict.get(err.returncode)
if errmsg is None:
output = 'Error: Failure code {:-5}'.format(err.returncode)
else:
output = errmsg
print("%Error: Host side: Failed: " + str(err.returncode))
return err.returncode, output
output_file_match = re.search('\/var\/.*dump.*\.gz', result.stdout)
output_filename = output_file_match.group()
return result.returncode, output_filename
def register():
"""Return the class name"""
return Showtech, MOD_NAME

View File

@ -1,2 +0,0 @@
[pytest]
addopts = --cov=scripts --cov-report html --cov-report term --cov-report xml --ignore=tests/*/test*_vectors.py

View File

@ -1,222 +0,0 @@
#!/usr/bin/env python3
#
import os
import syslog
import threading
from swsscommon.swsscommon import ConfigDBConnector
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# FILE
RADIUS_PAM_AUTH_CONF_DIR = "/etc/pam_radius_auth.d/"
RADIUS_PAM_AUTH_CONF_STATS_DIR = "/etc/pam_radius_auth.d/statistics/"
class RadiusCountersDbMon (threading.Thread):
def __init__(self, ID, name, radiusStatsInstance):
threading.Thread.__init__(self)
self.ID = ID
self.name = name
self.radiusStatsInstance = radiusStatsInstance
def handle_CountersDbRadiusClear(self, key, data):
# print("RadiusCountersDbMon.handle_CountersDbRadiusClear()")
if key == 'clear':
self.radiusStatsInstance.handle_clear()
def run(self):
# print("RadiusCountersDbMon.run()")
self.radiusStatsInstance.counters_db.subscribe('RADIUS', lambda table, key, data: self.handle_CountersDbRadiusClear(key, data))
self.radiusStatsInstance.counters_db.listen()
# print("RadiusCountersDbMon.run(): After listen()")
class RadiusStatsFileHandler(FileSystemEventHandler):
def __init__(self, radiusStatsInstance):
self.radiusStatsInstance = radiusStatsInstance
def on_any_event(self, event):
# print("RadiusStatsFileHandler.on_any_event()")
if event.is_directory:
return None
self.radiusStatsInstance.handle_update(os.path.basename(event.src_path))
class RadiusStatsFileMon ():
def __init__(self, radiusStatsInstance):
self.event_handler = RadiusStatsFileHandler(radiusStatsInstance)
self.observer = Observer()
self.observer.schedule(self.event_handler, RADIUS_PAM_AUTH_CONF_STATS_DIR, recursive=False)
self.observer.start()
# print("RadiusStatsFileMon.__init__(): After observer.start()")
def stop(self):
# print("RadiusStatsFileMon.stop()")
self.observer.stop()
# print("RadiusStatsFileMon.stop(): After observer.stop()")
self.observer.join()
# print("RadiusStatsFileMon.stop(): After observer.join()")
class RadiusStatistics:
def __init__(self, cfg_db, rad_global_conf, radius_conf):
self.radius_counter_names = [
"counter_0",
"access_requests",
"access_accepts",
"access_rejects",
"accounting_requests",
"accounting_responses",
"counter_6",
"counter_7",
"counter_8",
"counter_9",
"counter_10",
"access_challenges",
"counter_12",
"counter_13",
"counter_14",
"counter_15",
"counter_16",
"retried_access_requests",
"counter_18",
"counter_19",
"retried_accounting_requests",
"counter_21",
"counter_22",
"counter_23",
"counter_24",
"counter_25",
"counter_26",
"retried_access_challenges",
"counter_28",
"counter_29",
"counter_30",
"counter_31",
"timeouts",
"bad_authenticators",
"invalid_packets",
"counter_35",
]
self.radius_global = {
'statistics': 'False'
}
self.radius_servers = {}
self.config_db = cfg_db
for row in rad_global_conf:
self.radius_global_update(row, rad_global_conf[row])
for row in radius_conf:
self.radius_server_update(row, radius_conf[row])
self.counters_db = ConfigDBConnector()
self.counters_db.db_connect('COUNTERS_DB', wait_for_init=False,
retry_on=True)
syslog.syslog(syslog.LOG_INFO, 'CountersDB connect success')
self.dbmon_thread = RadiusCountersDbMon("RadiusCountersDbMon",
"RadiusCountersDbMon", self)
self.dbmon_thread.daemon = True
self.dbmon_thread.start()
self.filemon = RadiusStatsFileMon(self)
syslog.syslog(syslog.LOG_INFO, 'RADIUS Stats File Monitor started')
def radius_global_update(self, key, data):
if key == 'global':
self.radius_global.update(data)
for addr in self.radius_servers:
self.create_file(addr)
def radius_server_update(self, key, data):
if data == {}:
if key in self.radius_servers:
del self.radius_servers[key]
else:
self.radius_servers[key] = data
self.create_file(key)
def create_file(self, addr):
# print( "RadiusStatistics.create_file({})".format(addr))
stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + addr
if self.radius_global['statistics'] == 'False':
if os.path.exists(stats_file):
os.unlink(stats_file)
else:
open(stats_file, 'a').close()
os.chmod(stats_file, 0o666)
self.handle_update(addr)
def handle_clear(self):
# print( "RadiusStatistics.handle_clear()")
for filename in os.listdir(RADIUS_PAM_AUTH_CONF_STATS_DIR):
stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + filename
open(stats_file, 'w').close()
def handle_update(self, srv):
# print( "RadiusStatistics.handle_update({})".format(srv))
if self.radius_global['statistics'] == 'False':
return
stats_file = RADIUS_PAM_AUTH_CONF_STATS_DIR + srv
entry = None
if os.path.exists(stats_file):
with open(stats_file, 'r') as f:
lines = f.readlines()
if len(lines) > 0:
radius_counters = lines[0].split(' ')
entry = dict(zip(self.radius_counter_names, radius_counters))
counters_db = ConfigDBConnector()
counters_db.db_connect('COUNTERS_DB', wait_for_init=False,
retry_on=False)
counters_db.set_entry('RADIUS_SERVER_STATS', srv, entry)
counters_db.close(counters_db.COUNTERS_DB)
class AAAStatsDaemon:
def __init__(self):
self.config_db = ConfigDBConnector()
self.config_db.connect(wait_for_init=True, retry_on=True)
syslog.syslog(syslog.LOG_INFO, 'ConfigDB connect success')
radius_global = self.config_db.get_table('RADIUS')
radius_server = self.config_db.get_table('RADIUS_SERVER')
self.radiusstats = RadiusStatistics(self.config_db, radius_global,
radius_server)
def radius_global_handler(self, key, data):
self.radiusstats.radius_global_update(key, data)
def radius_server_handler(self, key, data):
self.radiusstats.radius_server_update(key, data)
def start(self):
self.config_db.subscribe('RADIUS_SERVER',
lambda table, key, data: self.radius_server_handler(key, data))
self.config_db.subscribe('RADIUS',
lambda table, key, data: self.radius_global_handler(key, data))
self.config_db.listen()
# print( "After config_db.listen()")
syslog.syslog(syslog.LOG_INFO, 'Stopping FileMon')
self.radiusstats.filemon.stop()
# print( "Exiting")
syslog.syslog(syslog.LOG_INFO, 'Exiting')
def main():
daemon = AAAStatsDaemon()
daemon.start()
if __name__ == "__main__":
main()

View File

@ -1,900 +0,0 @@
#!/usr/bin/env python3
#
# caclmgrd
#
# Control plane ACL manager daemon for SONiC
#
# Upon starting, this daemon reads control plane ACL tables and rules from
# Config DB, converts the rules into iptables rules and installs the iptables
# rules. The daemon then indefintely listens for notifications from Config DB
# and updates iptables rules if control plane ACL configuration has changed.
#
try:
import ipaddress
import os
import subprocess
import sys
import threading
import time
from sonic_py_common import daemon_base, device_info, multi_asic
from swsscommon import swsscommon
except ImportError as err:
raise ImportError("%s - required module not found" % str(err))
VERSION = "1.0"
SYSLOG_IDENTIFIER = "caclmgrd"
DEFAULT_NAMESPACE = ''
# ========================== Helper Functions =========================
def _ip_prefix_in_key(key):
"""
Function to check if IP prefix is present in a Redis database key.
If it is present, then the key will be a tuple. Otherwise, the
key will be a string.
"""
return (isinstance(key, tuple))
# ============================== Classes ==============================
class ControlPlaneAclManager(daemon_base.DaemonBase):
"""
Class which reads control plane ACL tables and rules from Config DB,
translates them into equivalent iptables commands and runs those
commands in order to apply the control plane ACLs.
Attributes:
config_db: Handle to Config Redis database via SwSS SDK
"""
FEATURE_TABLE = "FEATURE"
ACL_TABLE = "ACL_TABLE"
ACL_RULE = "ACL_RULE"
DEVICE_METADATA_TABLE = "DEVICE_METADATA"
MUX_CABLE_TABLE = "MUX_CABLE_TABLE"
ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE"
BFD_SESSION_TABLE = "BFD_SESSION_TABLE"
# To specify a port range instead of a single port, use iptables format:
# separate start and end ports with a colon, e.g., "1000:2000"
ACL_SERVICES = {
"NTP": {
"ip_protocols": ["udp"],
"dst_ports": ["123"],
"multi_asic_ns_to_host_fwd":False
},
"SNMP": {
"ip_protocols": ["tcp", "udp"],
"dst_ports": ["161"],
"multi_asic_ns_to_host_fwd":True
},
"SSH": {
"ip_protocols": ["tcp"],
"dst_ports": ["22"],
"multi_asic_ns_to_host_fwd":True
},
"ANY": {
"ip_protocols": ["any"],
"dst_ports": ["0"],
"multi_asic_ns_to_host_fwd":False
}
}
UPDATE_DELAY_SECS = 0.5
DualToR = False
bfdAllowed = False
def __init__(self, log_identifier):
super(ControlPlaneAclManager, self).__init__(log_identifier)
# Update-thread-specific data per namespace
self.update_thread = {}
self.lock = {}
self.num_changes = {}
# Initialize update-thread-specific data for default namespace
self.update_thread[DEFAULT_NAMESPACE] = None
self.lock[DEFAULT_NAMESPACE] = threading.Lock()
self.num_changes[DEFAULT_NAMESPACE] = 0
if device_info.is_multi_npu():
swsscommon.SonicDBConfig.load_sonic_global_db_config()
self.config_db_map = {}
self.iptables_cmd_ns_prefix = {}
self.config_db_map[DEFAULT_NAMESPACE] = swsscommon.ConfigDBConnector(use_unix_socket_path=True, namespace=DEFAULT_NAMESPACE)
self.config_db_map[DEFAULT_NAMESPACE].connect()
self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE] = ""
self.namespace_mgmt_ip = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE], DEFAULT_NAMESPACE)
self.namespace_mgmt_ipv6 = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[DEFAULT_NAMESPACE], DEFAULT_NAMESPACE)
self.namespace_docker_mgmt_ip = {}
self.namespace_docker_mgmt_ipv6 = {}
# Get all features that are present {feature_name : True/False}
self.feature_present = {}
self.update_feature_present()
metadata = self.config_db_map[DEFAULT_NAMESPACE].get_table(self.DEVICE_METADATA_TABLE)
if 'subtype' in metadata['localhost'] and metadata['localhost']['subtype'] == 'DualToR':
self.DualToR = True
namespaces = multi_asic.get_all_namespaces()
for front_asic_namespace in namespaces['front_ns']:
self.update_thread[front_asic_namespace] = None
self.lock[front_asic_namespace] = threading.Lock()
self.num_changes[front_asic_namespace] = 0
self.config_db_map[front_asic_namespace] = swsscommon.ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespace)
self.config_db_map[front_asic_namespace].connect()
self.iptables_cmd_ns_prefix[front_asic_namespace] = "ip netns exec " + front_asic_namespace + " "
self.namespace_docker_mgmt_ip[front_asic_namespace] = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[front_asic_namespace],
front_asic_namespace)
self.namespace_docker_mgmt_ipv6[front_asic_namespace] = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[front_asic_namespace],
front_asic_namespace)
for back_asic_namespace in namespaces['back_ns']:
self.update_thread[back_asic_namespace] = None
self.lock[back_asic_namespace] = threading.Lock()
self.num_changes[back_asic_namespace] = 0
self.iptables_cmd_ns_prefix[back_asic_namespace] = "ip netns exec " + back_asic_namespace + " "
self.namespace_docker_mgmt_ip[back_asic_namespace] = self.get_namespace_mgmt_ip(self.iptables_cmd_ns_prefix[back_asic_namespace],
back_asic_namespace)
self.namespace_docker_mgmt_ipv6[back_asic_namespace] = self.get_namespace_mgmt_ipv6(self.iptables_cmd_ns_prefix[back_asic_namespace],
back_asic_namespace)
def get_namespace_mgmt_ip(self, iptable_ns_cmd_prefix, namespace):
ip_address_get_command = iptable_ns_cmd_prefix + "ip -4 -o addr show " + ("eth0" if namespace else "docker0") +\
" | awk '{print $4}' | cut -d'/' -f1 | head -1"
return self.run_commands([ip_address_get_command])
def get_namespace_mgmt_ipv6(self, iptable_ns_cmd_prefix, namespace):
ipv6_address_get_command = iptable_ns_cmd_prefix + "ip -6 -o addr show scope global " + ("eth0" if namespace else "docker0") +\
" | awk '{print $4}' | cut -d'/' -f1 | head -1"
return self.run_commands([ipv6_address_get_command])
def run_commands(self, commands):
"""
Given a list of shell commands, run them in order
Args:
commands: List of strings, each string is a shell command
"""
for cmd in commands:
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
self.log_error("Error running command '{}'".format(cmd))
elif stdout:
return stdout.rstrip('\n')
return ""
def parse_int_to_tcp_flags(self, hex_value):
tcp_flags_str = ""
if hex_value & 0x01:
tcp_flags_str += "FIN,"
if hex_value & 0x02:
tcp_flags_str += "SYN,"
if hex_value & 0x04:
tcp_flags_str += "RST,"
if hex_value & 0x08:
tcp_flags_str += "PSH,"
if hex_value & 0x10:
tcp_flags_str += "ACK,"
if hex_value & 0x20:
tcp_flags_str += "URG,"
# iptables doesn't handle the flags below now. It has some special keys for it:
# --ecn-tcp-cwr This matches if the TCP ECN CWR (Congestion Window Received) bit is set.
# --ecn-tcp-ece This matches if the TCP ECN ECE (ECN Echo) bit is set.
# if hex_value & 0x40:
# tcp_flags_str += "ECE,"
# if hex_value & 0x80:
# tcp_flags_str += "CWR,"
# Delete the trailing comma
tcp_flags_str = tcp_flags_str[:-1]
return tcp_flags_str
def update_feature_present(self):
feature_tb_info = self.config_db_map[DEFAULT_NAMESPACE].get_table(self.FEATURE_TABLE)
if feature_tb_info:
for k, v in feature_tb_info.items():
self.feature_present[k] = True
def generate_block_ip2me_traffic_iptables_commands(self, namespace):
INTERFACE_TABLE_NAME_LIST = [
"LOOPBACK_INTERFACE",
"MGMT_INTERFACE",
"VLAN_INTERFACE",
"PORTCHANNEL_INTERFACE",
"INTERFACE"
]
block_ip2me_cmds = []
# Add iptables rules to drop all packets destined for peer-to-peer interface IP addresses
for iface_table_name in INTERFACE_TABLE_NAME_LIST:
iface_table = self.config_db_map[namespace].get_table(iface_table_name)
if iface_table:
for key, _ in iface_table.items():
if not _ip_prefix_in_key(key):
continue
iface_name, iface_cidr = key
ip_ntwrk = ipaddress.ip_network(iface_cidr, strict=False)
# For VLAN interfaces, the IP address we want to block is the default gateway (i.e.,
# the first available host IP address of the VLAN subnet)
ip_addr = next(ip_ntwrk.hosts()) if iface_table_name == "VLAN_INTERFACE" else ip_ntwrk.network_address
if isinstance(ip_ntwrk, ipaddress.IPv4Network):
block_ip2me_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -d {}/{} -j DROP".format(ip_addr, ip_ntwrk.max_prefixlen))
elif isinstance(ip_ntwrk, ipaddress.IPv6Network):
block_ip2me_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -d {}/{} -j DROP".format(ip_addr, ip_ntwrk.max_prefixlen))
else:
self.log_warning("Unrecognized IP address type on interface '{}': {}".format(iface_name, ip_ntwrk))
return block_ip2me_cmds
def generate_allow_internal_docker_ip_traffic_commands(self, namespace):
allow_internal_docker_ip_cmds = []
if namespace:
# For namespace docker allow local communication on docker management ip for all proto
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_docker_mgmt_ip[namespace], self.namespace_docker_mgmt_ip[namespace]))
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_docker_mgmt_ipv6[namespace], self.namespace_docker_mgmt_ipv6[namespace]))
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_mgmt_ip, self.namespace_docker_mgmt_ip[namespace]))
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_mgmt_ipv6, self.namespace_docker_mgmt_ipv6[namespace]))
else:
# Also host namespace communication on docker bridge on multi-asic.
if self.namespace_docker_mgmt_ip:
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_mgmt_ip, self.namespace_mgmt_ip))
if self.namespace_docker_mgmt_ipv6:
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format
(self.namespace_mgmt_ipv6, self.namespace_mgmt_ipv6))
# In host allow all tcp/udp traffic from namespace docker eth0 management ip to host docker bridge
for docker_mgmt_ip in list(self.namespace_docker_mgmt_ip.values()):
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s {} -d {} -j ACCEPT".format
(docker_mgmt_ip, self.namespace_mgmt_ip))
for docker_mgmt_ipv6 in list(self.namespace_docker_mgmt_ipv6.values()):
allow_internal_docker_ip_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s {} -d {} -j ACCEPT".format
(docker_mgmt_ipv6, self.namespace_mgmt_ipv6))
return allow_internal_docker_ip_cmds
def generate_fwd_traffic_from_namespace_to_host_commands(self, namespace, acl_source_ip_map):
"""
The below SNAT and DNAT rules are added in asic namespace in multi-ASIC platforms. It helps to forward request coming
in through the front panel interfaces created/present in the asic namespace for the servie running in linux host network namespace.
The external IP addresses are NATed to the internal docker IP addresses for the Host service to respond.
"""
if not namespace:
return []
fwd_traffic_from_namespace_to_host_cmds = []
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -t nat -X")
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -t nat -F")
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -t nat -X")
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -t nat -F")
for acl_service in self.ACL_SERVICES:
if self.ACL_SERVICES[acl_service]["multi_asic_ns_to_host_fwd"]:
# Get the Source IP Set if exists else use default source ip prefix
nat_source_ipv4_set = acl_source_ip_map[acl_service]["ipv4"] if acl_source_ip_map and acl_source_ip_map[acl_service]["ipv4"] else { "0.0.0.0/0" }
nat_source_ipv6_set = acl_source_ip_map[acl_service]["ipv6"] if acl_source_ip_map and acl_source_ip_map[acl_service]["ipv6"] else { "::/0" }
for ip_protocol in self.ACL_SERVICES[acl_service]["ip_protocols"]:
for dst_port in self.ACL_SERVICES[acl_service]["dst_ports"]:
for ipv4_src_ip in nat_source_ipv4_set:
# IPv4 rules
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] +
"iptables -t nat -A PREROUTING -p {} -s {} --dport {} -j DNAT --to-destination {}".format
(ip_protocol, ipv4_src_ip, dst_port,
self.namespace_mgmt_ip))
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] +
"iptables -t nat -A POSTROUTING -p {} -s {} --dport {} -j SNAT --to-source {}".format
(ip_protocol, ipv4_src_ip, dst_port,
self.namespace_docker_mgmt_ip[namespace]))
for ipv6_src_ip in nat_source_ipv6_set:
# IPv6 rules
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] +
"ip6tables -t nat -A PREROUTING -p {} -s {} --dport {} -j DNAT --to-destination {}".format
(ip_protocol, ipv6_src_ip, dst_port,
self.namespace_mgmt_ipv6))
fwd_traffic_from_namespace_to_host_cmds.append(self.iptables_cmd_ns_prefix[namespace] +
"ip6tables -t nat -A POSTROUTING -p {} -s {} --dport {} -j SNAT --to-source {}".format
(ip_protocol,ipv6_src_ip, dst_port,
self.namespace_docker_mgmt_ipv6[namespace]))
return fwd_traffic_from_namespace_to_host_cmds
def is_rule_ipv4(self, rule_props):
if (("SRC_IP" in rule_props and rule_props["SRC_IP"]) or
("DST_IP" in rule_props and rule_props["DST_IP"])):
return True
else:
return False
def is_rule_ipv6(self, rule_props):
if (("SRC_IPV6" in rule_props and rule_props["SRC_IPV6"]) or
("DST_IPV6" in rule_props and rule_props["DST_IPV6"])):
return True
else:
return False
def setup_dhcp_chain(self, namespace):
all_chains = self.get_chain_list(self.iptables_cmd_ns_prefix[namespace], [""])
dhcp_chain_exist = "DHCP" in all_chains
iptables_cmds = []
if dhcp_chain_exist:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -F DHCP")
self.log_info("DHCP chain exists, flush")
else:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -N DHCP")
self.log_info("DHCP chain does not exist, create")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A DHCP -j RETURN")
self.log_info("Issuing the following iptables commands for DHCP chain:")
for cmd in iptables_cmds:
self.log_info(" " + cmd)
self.run_commands(iptables_cmds)
def get_chain_list(self, iptable_ns_cmd_prefix, exclude_list):
command = iptable_ns_cmd_prefix + "iptables -L -v -n | grep Chain | awk '{print $2}'"
chain_list = self.run_commands([command]).splitlines()
for chain in exclude_list:
if chain in chain_list:
chain_list.remove(chain)
return chain_list
def dhcp_acl_rule(self, iptable_ns_cmd_prefix, op, intf, mark):
'''
sample: iptables --insert/delete/check DHCP -m physdev --physdev-in Ethernet4 -j DROP
sample: iptables --insert/delete/check DHCP -m mark --mark 0x67004 -j DROP
'''
if mark is None:
return iptable_ns_cmd_prefix + 'iptables --{} DHCP -m physdev --physdev-in {} -j DROP'.format(op, intf)
else:
return iptable_ns_cmd_prefix + 'iptables --{} DHCP -m mark --mark {} -j DROP'.format(op, mark)
def update_dhcp_chain(self, op, intf, mark):
for namespace in list(self.config_db_map.keys()):
check_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "check", intf, mark)
update_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], op, intf, mark)
execute = 0
ret = subprocess.call(check_cmd, shell=True) # ret==0 indicates the rule exists
if op == "insert" and ret == 1:
execute = 1
if op == "delete" and ret == 0:
execute = 1
if execute == 1:
subprocess.call(update_cmd, shell=True)
self.log_info("Update DHCP chain: {}".format(update_cmd))
def update_dhcp_acl(self, key, op, data, mark):
if "state" not in data:
self.log_warning("Unexpected update in MUX_CABLE_TABLE")
return
intf = key
state = data["state"]
if state == "active":
self.update_dhcp_chain("delete", intf, mark)
elif state == "standby":
self.update_dhcp_chain("insert", intf, mark)
elif state == "unknown":
self.update_dhcp_chain("delete", intf, mark)
elif state == "error":
self.log_warning("Cable state shows error")
else:
self.log_warning("Unexpected cable state")
def update_dhcp_acl_for_mark_change(self, key, pre_mark, cur_mark):
for namespace in list(self.config_db_map.keys()):
check_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "check", key, pre_mark)
ret = subprocess.call(check_cmd, shell=True) # ret==0 indicates the rule exists
'''update only when the rule with pre_mark exists'''
if ret == 0:
delete_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "delete", key, pre_mark)
insert_cmd = self.dhcp_acl_rule(self.iptables_cmd_ns_prefix[namespace], "insert", key, cur_mark)
subprocess.call(delete_cmd, shell=True)
self.log_info("Update DHCP chain: {}".format(delete_cmd))
subprocess.call(insert_cmd, shell=True)
self.log_info("Update DHCP chain: {}".format(insert_cmd))
def get_acl_rules_and_translate_to_iptables_commands(self, namespace):
"""
Retrieves current ACL tables and rules from Config DB, translates
control plane ACLs into a list of iptables commands that can be run
in order to install ACL rules.
Returns:
A list of strings, each string is an iptables shell command
"""
iptables_cmds = []
service_to_source_ip_map = {}
# First, add iptables commands to set default policies to accept all
# traffic. In case we are connected remotely, the connection will not
# drop when we flush the current rules
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P INPUT ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P FORWARD ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -P OUTPUT ACCEPT")
# Add iptables command to flush the current rules and delete all non-default chains
chain_list = self.get_chain_list(self.iptables_cmd_ns_prefix[namespace], ["DHCP"] if self.DualToR else [""])
for chain in chain_list:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -F " + chain)
if chain not in ["INPUT", "FORWARD", "OUTPUT"]:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -X " + chain)
# Add same set of commands for ip6tables
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P INPUT ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P FORWARD ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -P OUTPUT ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -F")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -X")
# Add iptables/ip6tables commands to allow all traffic from localhost
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -s 127.0.0.1 -i lo -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -s ::1 -i lo -j ACCEPT")
# Add iptables commands to allow internal docker traffic
iptables_cmds += self.generate_allow_internal_docker_ip_traffic_commands(namespace)
# Add iptables/ip6tables commands to allow all incoming packets from established
# connections or new connections which are related to established connections
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT")
# Add iptables/ip6tables commands to allow bidirectional ICMPv4 ping and traceroute
# TODO: Support processing ICMPv4 service ACL rules, and remove this blanket acceptance
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p icmp --icmp-type time-exceeded -j ACCEPT")
# Add iptables/ip6tables commands to allow bidirectional ICMPv6 ping and traceroute
# TODO: Support processing ICMPv6 service ACL rules, and remove this blanket acceptance
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type echo-request -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type echo-reply -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT")
# Add iptables/ip6tables commands to allow all incoming Neighbor Discovery Protocol (NDP) NS/NA/RS/RA messages
# TODO: Support processing NDP service ACL rules, and remove this blanket acceptance
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-solicitation -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-advertisement -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type router-solicitation -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p icmpv6 --icmpv6-type router-advertisement -j ACCEPT")
# Add iptables commands to link the DCHP chain to block dhcp packets based on ingress interfaces
if self.DualToR:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 67 -j DHCP")
# Add iptables/ip6tables commands to allow all incoming IPv4 DHCP packets
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 67:68 -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p udp --dport 67:68 -j ACCEPT")
# Add iptables/ip6tables commands to allow all incoming IPv6 DHCP packets
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p udp --dport 546:547 -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p udp --dport 546:547 -j ACCEPT")
# Add iptables/ip6tables commands to allow all incoming BGP traffic
# TODO: Determine BGP ACLs based on configured device sessions, and remove this blanket acceptance
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -p tcp --dport 179 -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p tcp --dport 179 -j ACCEPT")
# Get current ACL tables and rules from Config DB
self._tables_db_info = self.config_db_map[namespace].get_table(self.ACL_TABLE)
self._rules_db_info = self.config_db_map[namespace].get_table(self.ACL_RULE)
num_ctrl_plane_acl_rules = 0
# Walk the ACL tables
for (table_name, table_data) in self._tables_db_info.items():
table_ip_version = None
# Ignore non-control-plane ACL tables
if table_data["type"] != self.ACL_TABLE_TYPE_CTRLPLANE:
continue
acl_services = table_data["services"]
for acl_service in acl_services:
if acl_service not in self.ACL_SERVICES:
self.log_warning("Ignoring control plane ACL '{}' with unrecognized service '{}'"
.format(table_name, acl_service))
continue
self.log_info("Translating ACL rules for control plane ACL '{}' (service: '{}')"
.format(table_name, acl_service))
# Obtain default IP protocol(s) and destination port(s) for this service
ip_protocols = self.ACL_SERVICES[acl_service]["ip_protocols"]
dst_ports = self.ACL_SERVICES[acl_service]["dst_ports"]
acl_rules = {}
for ((rule_table_name, rule_id), rule_props) in self._rules_db_info.items():
rule_props = {k.upper(): v for k,v in rule_props.items()}
if rule_table_name == table_name:
if not rule_props:
self.log_warning("rule_props for rule_id {} empty or null!".format(rule_id))
continue
try:
acl_rules[rule_props["PRIORITY"]] = rule_props
except KeyError:
self.log_error("rule_props for rule_id {} does not have key 'PRIORITY'!".format(rule_id))
continue
# If we haven't determined the IP version for this ACL table yet,
# try to do it now. We attempt to determine heuristically based on
# whether the src or dst IP of this rule is an IPv4 or IPv6 address.
if not table_ip_version:
if self.is_rule_ipv6(rule_props):
table_ip_version = 6
elif self.is_rule_ipv4(rule_props):
table_ip_version = 4
if (self.is_rule_ipv6(rule_props) and (table_ip_version == 4)):
self.log_error("CtrlPlane ACL table {} is a IPv4 based table and rule {} is a IPV6 rule! Ignoring rule."
.format(table_name, rule_id))
acl_rules.pop(rule_props["PRIORITY"])
elif (self.is_rule_ipv4(rule_props) and (table_ip_version == 6)):
self.log_error("CtrlPlane ACL table {} is a IPv6 based table and rule {} is a IPV4 rule! Ignroing rule."
.format(table_name, rule_id))
acl_rules.pop(rule_props["PRIORITY"])
# If we were unable to determine whether this ACL table contains
# IPv4 or IPv6 rules, log a message and skip processing this table.
if not table_ip_version:
self.log_warning("Unable to determine if ACL table '{}' contains IPv4 or IPv6 rules. Skipping table..."
.format(table_name))
continue
ipv4_src_ip_set = set()
ipv6_src_ip_set = set()
# For each ACL rule in this table (in descending order of priority)
for priority in sorted(iter(acl_rules.keys()), reverse=True):
rule_props = acl_rules[priority]
if "PACKET_ACTION" not in rule_props:
self.log_error("ACL rule does not contain PACKET_ACTION property")
continue
# Apply the rule to the default protocol(s) for this ACL service
for ip_protocol in ip_protocols:
for dst_port in dst_ports:
rule_cmd = "ip6tables" if table_ip_version == 6 else "iptables"
rule_cmd += " -A INPUT"
if ip_protocol != "any":
rule_cmd += " -p {}".format(ip_protocol)
if "SRC_IPV6" in rule_props and rule_props["SRC_IPV6"]:
rule_cmd += " -s {}".format(rule_props["SRC_IPV6"])
if rule_props["PACKET_ACTION"] == "ACCEPT":
ipv6_src_ip_set.add(rule_props["SRC_IPV6"])
elif "SRC_IP" in rule_props and rule_props["SRC_IP"]:
rule_cmd += " -s {}".format(rule_props["SRC_IP"])
if rule_props["PACKET_ACTION"] == "ACCEPT":
ipv4_src_ip_set.add(rule_props["SRC_IP"])
# Destination port 0 is reserved/unused port, so, using it to apply the rule to all ports.
if dst_port != "0":
rule_cmd += " --dport {}".format(dst_port)
# If there are TCP flags present and ip protocol is TCP, append them
if ip_protocol == "tcp" and "TCP_FLAGS" in rule_props and rule_props["TCP_FLAGS"]:
tcp_flags, tcp_flags_mask = rule_props["TCP_FLAGS"].split("/")
tcp_flags = int(tcp_flags, 16)
tcp_flags_mask = int(tcp_flags_mask, 16)
if tcp_flags_mask > 0:
rule_cmd += " --tcp-flags {mask} {flags}".format(mask=self.parse_int_to_tcp_flags(tcp_flags_mask), flags=self.parse_int_to_tcp_flags(tcp_flags))
# Append the packet action as the jump target
rule_cmd += " -j {}".format(rule_props["PACKET_ACTION"])
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + rule_cmd)
num_ctrl_plane_acl_rules += 1
service_to_source_ip_map.update({ acl_service:{ "ipv4":ipv4_src_ip_set, "ipv6":ipv6_src_ip_set } })
# Add iptables commands to block ip2me traffic
iptables_cmds += self.generate_block_ip2me_traffic_iptables_commands(namespace)
# Add iptables/ip6tables commands to allow all incoming packets with TTL of 0 or 1
# This allows the device to respond to tools like tcptraceroute
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -m ttl --ttl-lt 2 -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -p tcp -m hl --hl-lt 2 -j ACCEPT")
# Finally, if the device has control plane ACLs configured,
# add iptables/ip6tables commands to drop all other incoming packets
if num_ctrl_plane_acl_rules > 0:
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -A INPUT -j DROP")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -A INPUT -j DROP")
return iptables_cmds, service_to_source_ip_map
def update_control_plane_acls(self, namespace):
"""
Convenience wrapper which retrieves current ACL tables and rules from
Config DB, translates control plane ACLs into a list of iptables
commands and runs them.
"""
iptables_cmds, service_to_source_ip_map = self.get_acl_rules_and_translate_to_iptables_commands(namespace)
self.log_info("Issuing the following iptables commands:")
for cmd in iptables_cmds:
self.log_info(" " + cmd)
self.run_commands(iptables_cmds)
self.update_control_plane_nat_acls(namespace, service_to_source_ip_map)
def update_control_plane_nat_acls(self, namespace, service_to_source_ip_map):
"""
Convenience wrapper for multi-asic platforms
which programs the NAT rules for redirecting the
traffic coming on the front panel interface map to namespace
to the host.
"""
# Add iptables commands to allow front panel traffic
iptables_cmds = self.generate_fwd_traffic_from_namespace_to_host_commands(namespace, service_to_source_ip_map)
self.log_info("Issuing the following iptables commands:")
for cmd in iptables_cmds:
self.log_info(" " + cmd)
self.run_commands(iptables_cmds)
def check_and_update_control_plane_acls(self, namespace, num_changes):
"""
This function is intended to be spawned in a separate thread.
Its purpose is to prevent unnecessary iptables updates if we receive
multiple rapid ACL table update notifications. It sleeps for UPDATE_DELAY_SECS
then checks if any more ACL table updates were received in that window. If new
updates were received, it will sleep again and repeat the process until no
updates were received during the delay window, at which point it will update
iptables using the current ACL rules.
"""
while True:
# Sleep for our delay interval
time.sleep(self.UPDATE_DELAY_SECS)
with self.lock[namespace]:
if self.num_changes[namespace] > num_changes:
# More ACL table changes occurred since this thread was spawned
# spawn a new thread with the current number of changes
new_changes = self.num_changes[namespace] - num_changes
self.log_info("ACL config not stable for namespace '{}': {} changes detected in the past {} seconds. Skipping update ..."
.format(namespace, new_changes, self.UPDATE_DELAY_SECS))
num_changes = self.num_changes[namespace]
else:
if num_changes == self.num_changes[namespace] and num_changes > 0:
self.log_info("ACL config for namespace '{}' has not changed for {} seconds. Applying updates ..."
.format(namespace, self.UPDATE_DELAY_SECS))
self.update_control_plane_acls(namespace)
else:
self.log_error("Error updating ACLs for namespace '{}'".format(namespace))
# Re-initialize
self.num_changes[namespace] = 0
self.update_thread[namespace] = None
return
def allow_bfd_protocol(self, namespace):
iptables_cmds = []
# Add iptables/ip6tables commands to allow all BFD singlehop and multihop sessions
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "iptables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT")
iptables_cmds.append(self.iptables_cmd_ns_prefix[namespace] + "ip6tables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT")
self.run_commands(iptables_cmds)
def run(self):
# Set select timeout to 1 second
SELECT_TIMEOUT_MS = 1000
self.log_info("Starting up ...")
if not os.geteuid() == 0:
self.log_error("Must be root to run this daemon")
print("Error: Must be root to run this daemon")
sys.exit(1)
# Initlaize Global config that loads all database*.json
if device_info.is_multi_npu():
swsscommon.SonicDBConfig.initializeGlobalConfig()
# Create the Select object
sel = swsscommon.Select()
# Set up STATE_DB connector to monitor the change in MUX_CABLE_TABLE
state_db_connector = None
subscribe_mux_cable = None
subscribe_dhcp_packet_mark = None
state_db_id = swsscommon.SonicDBConfig.getDbId("STATE_DB")
dhcp_packet_mark_tbl = {}
# set up state_db connector
state_db_connector = swsscommon.DBConnector("STATE_DB", 0)
if self.DualToR:
self.log_info("Dual ToR mode")
subscribe_mux_cable = swsscommon.SubscriberStateTable(state_db_connector, self.MUX_CABLE_TABLE)
sel.addSelectable(subscribe_mux_cable)
subscribe_dhcp_packet_mark = swsscommon.SubscriberStateTable(state_db_connector, "DHCP_PACKET_MARK")
sel.addSelectable(subscribe_dhcp_packet_mark)
# create DHCP chain
for namespace in list(self.config_db_map.keys()):
self.setup_dhcp_chain(namespace)
# This should be migrated from state_db BFD session table to feature_table in the future when feature table support gets added for BFD
subscribe_bfd_session = swsscommon.SubscriberStateTable(state_db_connector, self.BFD_SESSION_TABLE)
sel.addSelectable(subscribe_bfd_session)
# Map of Namespace <--> susbcriber table's object
config_db_subscriber_table_map = {}
# Loop through all asic namespaces (if present) and host namespace (DEFAULT_NAMESPACE)
for namespace in list(self.config_db_map.keys()):
# Unconditionally update control plane ACLs once at start on given namespace
self.update_control_plane_acls(namespace)
# Connect to Config DB of given namespace
acl_db_connector = swsscommon.DBConnector("CONFIG_DB", 0, False, namespace)
# Subscribe to notifications when ACL tables changes
subscribe_acl_table = swsscommon.SubscriberStateTable(acl_db_connector, swsscommon.CFG_ACL_TABLE_TABLE_NAME)
# Subscribe to notifications when ACL rule tables changes
subscribe_acl_rule_table = swsscommon.SubscriberStateTable(acl_db_connector, swsscommon.CFG_ACL_RULE_TABLE_NAME)
# Add both tables to the selectable object
sel.addSelectable(subscribe_acl_table)
sel.addSelectable(subscribe_acl_rule_table)
# Update the map
config_db_subscriber_table_map[namespace] = []
config_db_subscriber_table_map[namespace].append(subscribe_acl_table)
config_db_subscriber_table_map[namespace].append(subscribe_acl_rule_table)
# Get the ACL rule table seprator
acl_rule_table_seprator = subscribe_acl_rule_table.getTableNameSeparator()
# Loop on select to see if any event happen on state db or config db of any namespace
while True:
(state, selectableObj) = sel.select(SELECT_TIMEOUT_MS)
# Continue if select is timeout or selectable object is not return
if state != swsscommon.Select.OBJECT:
continue
# Get the redisselect object from selectable object
redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(selectableObj)
# Get the corresponding namespace and db_id from redisselect
namespace = redisSelectObj.getDbConnector().getNamespace()
db_id = redisSelectObj.getDbConnector().getDbId()
if db_id == state_db_id:
while True:
key, op, fvs = subscribe_bfd_session.pop()
if not key:
break
if op == 'SET' and not self.bfdAllowed:
self.allow_bfd_protocol(namespace)
self.bfdAllowed = True
sel.removeSelectable(subscribe_bfd_session)
if self.DualToR:
'''dhcp packet mark update'''
while True:
key, op, fvs = subscribe_dhcp_packet_mark.pop()
if not key:
break
self.log_info("dhcp packet mark update : '%s'" % str((key, op, fvs)))
'''initial value is None'''
pre_mark = None if key not in dhcp_packet_mark_tbl else dhcp_packet_mark_tbl[key]
cur_mark = None if op == 'DEL' else dict(fvs)['mark']
dhcp_packet_mark_tbl[key] = cur_mark
self.update_dhcp_acl_for_mark_change(key, pre_mark, cur_mark)
'''mux cable update'''
while True:
key, op, fvs = subscribe_mux_cable.pop()
if not key:
break
self.log_info("mux cable update : '%s'" % str((key, op, fvs)))
mark = None if key not in dhcp_packet_mark_tbl else dhcp_packet_mark_tbl[key]
self.update_dhcp_acl(key, op, dict(fvs), mark)
continue
ctrl_plane_acl_notification = set()
# Pop data of both Subscriber Table object of namespace that got config db acl table event
for table in config_db_subscriber_table_map[namespace]:
while True:
(key, op, fvp) = table.pop()
# Pop of table that does not have data so break
if key == '':
break
# ACL Table notification. We will take Control Plane ACTION for any ACL Table Event
# This can be optimize further but we should not have many acl table set/del events in normal
# scenario
if acl_rule_table_seprator not in key:
ctrl_plane_acl_notification.add(namespace)
# Check ACL Rule notification and make sure Rule point to ACL Table which is Controlplane
else:
acl_table = key.split(acl_rule_table_seprator)[0]
if self.config_db_map[namespace].get_table(self.ACL_TABLE)[acl_table]["type"] == self.ACL_TABLE_TYPE_CTRLPLANE:
ctrl_plane_acl_notification.add(namespace)
# Update the Control Plane ACL of the namespace that got config db acl table event
for namespace in ctrl_plane_acl_notification:
with self.lock[namespace]:
if self.num_changes[namespace] == 0:
self.log_info("ACL change detected for namespace '{}'".format(namespace))
# Increment the number of change events we've received for this namespace
self.num_changes[namespace] += 1
# If an update thread is not already spawned for the namespace which we received
# the ACL table update event, spawn one now
if not self.update_thread[namespace]:
self.log_info("Spawning ACL update thread for namepsace '{}' ...".format(namespace))
self.update_thread[namespace] = threading.Thread(target=self.check_and_update_control_plane_acls,
args=(namespace, self.num_changes[namespace]))
self.update_thread[namespace].start()
# ============================= Functions =============================
def main():
# Instantiate a ControlPlaneAclManager object
caclmgr = ControlPlaneAclManager(SYSLOG_IDENTIFIER)
# Log all messages from INFO level and higher
caclmgr.set_min_log_priority_info()
caclmgr.run()
if __name__ == "__main__":
main()

View File

@ -1,228 +0,0 @@
#!/usr/bin/env python3
#
# determine-reboot-cause
#
# Program designed to run once, soon after system boot which will
# determine the cause of the previous reboot and store it to the disk,
#
try:
import datetime
import json
import os
import pwd
import re
import sys
from sonic_py_common import device_info, logger
except ImportError as err:
raise ImportError("%s - required module not found" % str(err))
VERSION = "1.0"
SYSLOG_IDENTIFIER = "determine-reboot-cause"
REBOOT_CAUSE_DIR = "/host/reboot-cause/"
REBOOT_CAUSE_HISTORY_DIR = "/host/reboot-cause/history/"
REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "reboot-cause.txt")
PREVIOUS_REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "previous-reboot-cause.json")
FIRST_BOOT_PLATFORM_FILE = "/tmp/notify_firstboot_to_platform"
REBOOT_TYPE_KEXEC_FILE = "/proc/cmdline"
# The following SONIC_BOOT_TYPEs come from the warm/fast reboot script which is in sonic-utilities
# Because the system can be rebooted from some old versions, we have to take all possible BOOT options into consideration.
# On 201803, 201807 we have
# BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') fast-reboot"
# On 201811 and later we have
# BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" where BOOT_TYPE_ARG can be warm, fastfast or fast
# To extract the commom part of them, we should have the following PATTERN
REBOOT_TYPE_KEXEC_PATTERN_WARM = ".*SONIC_BOOT_TYPE=(warm|fastfast).*"
REBOOT_TYPE_KEXEC_PATTERN_FAST = ".*SONIC_BOOT_TYPE=(fast|fast-reboot).*"
REBOOT_CAUSE_UNKNOWN = "Unknown"
REBOOT_CAUSE_NON_HARDWARE = "Non-Hardware"
REBOOT_CAUSE_HARDWARE_OTHER = "Hardware - Other"
# Global logger class instance
sonic_logger = logger.Logger(SYSLOG_IDENTIFIER)
# ============================= Functions =============================
def parse_warmfast_reboot_from_proc_cmdline():
if os.path.isfile(REBOOT_TYPE_KEXEC_FILE):
with open(REBOOT_TYPE_KEXEC_FILE) as cause_file:
cause_file_kexec = cause_file.readline()
m = re.search(REBOOT_TYPE_KEXEC_PATTERN_WARM, cause_file_kexec)
if m and m.group(1):
return 'warm-reboot'
m = re.search(REBOOT_TYPE_KEXEC_PATTERN_FAST, cause_file_kexec)
if m and m.group(1):
return 'fast-reboot'
return None
def find_software_reboot_cause_from_reboot_cause_file():
software_reboot_cause = REBOOT_CAUSE_UNKNOWN
if os.path.isfile(REBOOT_CAUSE_FILE):
with open(REBOOT_CAUSE_FILE) as cause_file:
software_reboot_cause = cause_file.readline().rstrip('\n')
sonic_logger.log_info("{} indicates the reboot cause: {}".format(REBOOT_CAUSE_FILE, software_reboot_cause))
else:
sonic_logger.log_info("Reboot cause file {} not found".format(REBOOT_CAUSE_FILE))
return software_reboot_cause
def find_first_boot_version():
build_version = "unknown"
version_info = device_info.get_sonic_version_info()
if version_info:
build_version = version_info['build_version']
return " (First boot of SONiC version {})".format(build_version)
def find_software_reboot_cause():
software_reboot_cause = find_software_reboot_cause_from_reboot_cause_file()
if software_reboot_cause == REBOOT_CAUSE_UNKNOWN:
if os.path.isfile(FIRST_BOOT_PLATFORM_FILE):
software_reboot_cause += find_first_boot_version()
os.remove(FIRST_BOOT_PLATFORM_FILE)
return software_reboot_cause
def find_proc_cmdline_reboot_cause():
proc_cmdline_reboot_cause = parse_warmfast_reboot_from_proc_cmdline()
if proc_cmdline_reboot_cause:
sonic_logger.log_info("/proc/cmdline indicates reboot type: {}".format(proc_cmdline_reboot_cause))
else:
sonic_logger.log_info("No reboot cause found from /proc/cmdline")
return proc_cmdline_reboot_cause
def get_reboot_cause_from_platform():
# Find hardware reboot cause using sonic_platform library
try:
import sonic_platform
platform = sonic_platform.platform.Platform()
chassis = platform.get_chassis()
hardware_reboot_cause_major, hardware_reboot_cause_minor = chassis.get_reboot_cause()
sonic_logger.log_info("Platform api returns reboot cause {}, {}".format(hardware_reboot_cause_major, hardware_reboot_cause_minor))
except ImportError:
sonic_logger.log_warning("sonic_platform package not installed. Unable to detect hardware reboot causes.")
hardware_reboot_cause_major, hardware_reboot_cause_minor = REBOOT_CAUSE_NON_HARDWARE, "N/A"
return hardware_reboot_cause_major, hardware_reboot_cause_minor
def find_hardware_reboot_cause():
hardware_reboot_cause_major, hardware_reboot_cause_minor = get_reboot_cause_from_platform()
if hardware_reboot_cause_major:
sonic_logger.log_info("Platform api indicates reboot cause {}".format(hardware_reboot_cause_major))
else:
sonic_logger.log_info("No reboot cause found from platform api")
hardware_reboot_cause = "{} ({})".format(hardware_reboot_cause_major, hardware_reboot_cause_minor)
return hardware_reboot_cause
def get_reboot_cause_dict(previous_reboot_cause, comment, gen_time):
"""Store the key infomation of device reboot into a dictionary by parsing the string in
previous_reboot_cause.
If user issused a command to reboot device, then user, command and time will be
stored into a dictionary.
If device was rebooted due to the kernel panic, then the string `Kernel Panic`
and time will be stored into a dictionary.
"""
reboot_cause_dict = {}
reboot_cause_dict['gen_time'] = gen_time
reboot_cause_dict['cause'] = previous_reboot_cause
reboot_cause_dict['user'] = "N/A"
reboot_cause_dict['time'] = "N/A"
reboot_cause_dict['comment'] = comment if comment is not None else "N/A"
if re.search(r'User issued', previous_reboot_cause):
# Match with "User issued '{}' command [User: {}, Time: {}]"
match = re.search(r'User issued \'(.*)\' command \[User: (.*), Time: (.*)\]', previous_reboot_cause)
if match is not None:
reboot_cause_dict['cause'] = match.group(1)
reboot_cause_dict['user'] = match.group(2)
reboot_cause_dict['time'] = match.group(3)
elif re.search(r'Kernel Panic', previous_reboot_cause):
match = re.search(r'Kernel Panic \[Time: (.*)\]', previous_reboot_cause)
if match is not None:
reboot_cause_dict['cause'] = "Kernel Panic"
reboot_cause_dict['time'] = match.group(1)
return reboot_cause_dict
def main():
# Configure logger to log all messages INFO level and higher
sonic_logger.set_min_log_priority_info()
sonic_logger.log_info("Starting up...")
if not os.geteuid() == 0:
sonic_logger.log_error("User {} does not have permission to execute".format(pwd.getpwuid(os.getuid()).pw_name))
sys.exit("This utility must be run as root")
# Create REBOOT_CAUSE_DIR if it doesn't exist
if not os.path.exists(REBOOT_CAUSE_DIR):
os.makedirs(REBOOT_CAUSE_DIR)
# Remove stale PREVIOUS_REBOOT_CAUSE_FILE if it exists
if os.path.exists(PREVIOUS_REBOOT_CAUSE_FILE):
os.remove(PREVIOUS_REBOOT_CAUSE_FILE)
# This variable is kept for future-use purpose. When proc_cmd_line/vendor/software provides
# any additional_reboot_info it will be stored as a "comment" in REBOOT_CAUSE_HISTORY_FILE
additional_reboot_info = "N/A"
# Check if the previous reboot was warm/fast reboot by testing whether there is "fast|fastfast|warm" in /proc/cmdline
proc_cmdline_reboot_cause = find_proc_cmdline_reboot_cause()
# If /proc/cmdline does not indicate reboot cause, check if the previous reboot was caused by hardware
if proc_cmdline_reboot_cause is None:
previous_reboot_cause = find_hardware_reboot_cause()
if previous_reboot_cause.startswith(REBOOT_CAUSE_NON_HARDWARE):
# If the reboot cause is non-hardware, get the reboot cause from REBOOT_CAUSE_FILE
previous_reboot_cause = find_software_reboot_cause()
else:
# Get the reboot cause from REBOOT_CAUSE_FILE
previous_reboot_cause = find_software_reboot_cause()
# Current time
reboot_cause_gen_time = str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
# Save the previous cause info into its history file as json format
reboot_cause_dict = get_reboot_cause_dict(previous_reboot_cause, additional_reboot_info, reboot_cause_gen_time)
# Create reboot-cause-#time#.json under history directory
REBOOT_CAUSE_HISTORY_FILE = os.path.join(REBOOT_CAUSE_HISTORY_DIR, "reboot-cause-{}.json".format(reboot_cause_gen_time))
# Create REBOOT_CAUSE_HISTORY_DIR if it doesn't exist
if not os.path.exists(REBOOT_CAUSE_HISTORY_DIR):
os.makedirs(REBOOT_CAUSE_HISTORY_DIR)
# Write the previous reboot cause to REBOOT_CAUSE_HISTORY_FILE as a JSON format
with open(REBOOT_CAUSE_HISTORY_FILE, "w") as reboot_cause_history_file:
json.dump(reboot_cause_dict, reboot_cause_history_file)
# Create a symbolic link to previous-reboot-cause.json file
os.symlink(REBOOT_CAUSE_HISTORY_FILE, PREVIOUS_REBOOT_CAUSE_FILE)
# Remove the old REBOOT_CAUSE_FILE
if os.path.exists(REBOOT_CAUSE_FILE):
os.remove(REBOOT_CAUSE_FILE)
# Write a new default reboot cause file for the next reboot
with open(REBOOT_CAUSE_FILE, "w") as cause_file:
cause_file.write(REBOOT_CAUSE_UNKNOWN)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@ -1,200 +0,0 @@
#!/usr/bin/env python3
'''
procdockerstatsd
Daemon which periodically gathers process and docker statistics and pushes the data to STATE_DB
'''
import os
import re
import subprocess
import sys
import time
from datetime import datetime
from sonic_py_common import daemon_base
from swsscommon import swsscommon
VERSION = '1.0'
SYSLOG_IDENTIFIER = "procdockerstatsd"
REDIS_HOSTIP = "127.0.0.1"
class ProcDockerStats(daemon_base.DaemonBase):
def __init__(self, log_identifier):
super(ProcDockerStats, self).__init__(log_identifier)
self.state_db = swsscommon.SonicV2Connector(host=REDIS_HOSTIP)
self.state_db.connect("STATE_DB")
def run_command(self, cmd):
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
self.log_error("Error running command '{}'".format(cmd))
return None
else:
return stdout
def format_docker_cmd_output(self, cmdout):
lines = cmdout.splitlines()
keys = re.split(" +", lines[0])
docker_data = dict()
docker_data_list = []
for line in lines[1:]:
values = re.split(" +", line)
docker_data = {key: value for key, value in zip(keys, values)}
docker_data_list.append(docker_data)
formatted_dict = self.create_docker_dict(docker_data_list)
return formatted_dict
def format_process_cmd_output(self, cmdout):
lines = cmdout.splitlines()
keys = re.split(" +", lines[0])
key_list = [key for key in keys if key]
process_data = dict()
process_data_list = []
for line in lines[1:]:
values = re.split(" +", line)
# To remove extra space before UID
val_list = [val for val in values if val]
# Merging extra columns created due to space in cmd ouput
val_list[8:] = [' '.join(val_list[8:])]
process_data = {key: value for key, value in zip(key_list, val_list)}
process_data_list.append(process_data)
return process_data_list
def convert_to_bytes(self, value):
UNITS_B = 'B'
UNITS_KB = 'KB'
UNITS_MB = 'MB'
UNITS_MiB = 'MiB'
UNITS_GiB = 'GiB'
res = re.match(r'(\d+\.?\d*)([a-zA-Z]+)', value)
value = float(res.groups()[0])
units = res.groups()[1]
if units.lower() == UNITS_KB.lower():
value *= 1000
elif units.lower() == UNITS_MB.lower():
value *= (1000 * 1000)
elif units.lower() == UNITS_MiB.lower():
value *= (1024 * 1024)
elif units.lower() == UNITS_GiB.lower():
value *= (1024 * 1024 * 1024)
return int(round(value))
def create_docker_dict(self, dict_list):
dockerdict = {}
for row in dict_list[0:]:
cid = row.get('CONTAINER ID')
if cid:
key = 'DOCKER_STATS|{}'.format(cid)
dockerdict[key] = {}
dockerdict[key]['NAME'] = row.get('NAME')
cpu = row.get('CPU %').split("%")
dockerdict[key]['CPU%'] = str(cpu[0])
memuse = row.get('MEM USAGE / LIMIT').split(" / ")
# converting MiB and GiB to bytes
dockerdict[key]['MEM_BYTES'] = str(self.convert_to_bytes(memuse[0]))
dockerdict[key]['MEM_LIMIT_BYTES'] = str(self.convert_to_bytes(memuse[1]))
mem = row.get('MEM %').split("%")
dockerdict[key]['MEM%'] = str(mem[0])
netio = row.get('NET I/O').split(" / ")
dockerdict[key]['NET_IN_BYTES'] = str(self.convert_to_bytes(netio[0]))
dockerdict[key]['NET_OUT_BYTES'] = str(self.convert_to_bytes(netio[1]))
blockio = row.get('BLOCK I/O').split(" / ")
dockerdict[key]['BLOCK_IN_BYTES'] = str(self.convert_to_bytes(blockio[0]))
dockerdict[key]['BLOCK_OUT_BYTES'] = str(self.convert_to_bytes(blockio[1]))
dockerdict[key]['PIDS'] = row.get('PIDS')
return dockerdict
def update_dockerstats_command(self):
cmd = "docker stats --no-stream -a"
data = self.run_command(cmd)
if not data:
self.log_error("'{}' returned null output".format(cmd))
return False
dockerdata = self.format_docker_cmd_output(data)
if not dockerdata:
self.log_error("formatting for docker output failed")
return False
# wipe out all data from state_db before updating
self.state_db.delete_all_by_pattern('STATE_DB', 'DOCKER_STATS|*')
for k1,v1 in dockerdata.items():
for k2,v2 in v1.items():
self.update_state_db(k1, k2, v2)
return True
def update_processstats_command(self):
data = self.run_command("ps -eo uid,pid,ppid,%mem,%cpu,stime,tty,time,cmd --sort -%cpu | head -1024")
processdata = self.format_process_cmd_output(data)
value = ""
# wipe out all data before updating with new values
self.state_db.delete_all_by_pattern('STATE_DB', 'PROCESS_STATS|*')
for row in processdata[0:]:
cid = row.get('PID')
if cid:
value = 'PROCESS_STATS|{}'.format(cid)
uid = row.get('UID')
self.update_state_db(value, 'UID', uid)
ppid = row.get('PPID')
self.update_state_db(value, 'PPID', ppid)
cpu = row.get('%CPU')
self.update_state_db(value, '%CPU', str(cpu))
mem = row.get('%MEM')
self.update_state_db(value, '%MEM', str(mem))
stime = row.get('STIME')
self.update_state_db(value, 'STIME', stime)
tty = row.get('TT')
self.update_state_db(value, 'TT', tty)
time = row.get('TIME')
self.update_state_db(value, 'TIME', time)
cmd = row.get('CMD')
self.update_state_db(value, 'CMD', cmd)
def update_state_db(self, key1, key2, value2):
self.state_db.set('STATE_DB', key1, key2, value2)
def run(self):
self.log_info("Starting up ...")
if not os.getuid() == 0:
self.log_error("Must be root to run this daemon")
print("Must be root to run this daemon")
sys.exit(1)
while True:
self.update_dockerstats_command()
datetimeobj = datetime.now()
# Adding key to store latest update time.
self.update_state_db('DOCKER_STATS|LastUpdateTime', 'lastupdate', str(datetimeobj))
self.update_processstats_command()
self.update_state_db('PROCESS_STATS|LastUpdateTime', 'lastupdate', str(datetimeobj))
# Data need to be updated every 2 mins. hence adding delay of 120 seconds
time.sleep(120)
self.log_info("Exiting ...")
def main():
# Instantiate a ProcDockerStats object
pd = ProcDockerStats(SYSLOG_IDENTIFIER)
# Log all messages from INFO level and higher
pd.set_min_log_priority_info()
pd.run()
if __name__ == '__main__':
main()

View File

@ -1,100 +0,0 @@
#!/usr/bin/env python3
#
# process-reboot-cause
#
# Program designed to read the previous reboot-cause files, log the last previous reboot-cause.
# And read the saved reboot-cause history files and save the reboot cause in the state-db.
#
try:
import json
import os
import pwd
import sys
from swsscommon import swsscommon
from sonic_py_common import logger
except ImportError as err:
raise ImportError("%s - required module not found" % str(err))
VERSION = "1.0"
SYSLOG_IDENTIFIER = "process-reboot-cause"
REBOOT_CAUSE_DIR = "/host/reboot-cause/"
REBOOT_CAUSE_HISTORY_DIR = "/host/reboot-cause/history/"
PREVIOUS_REBOOT_CAUSE_FILE = os.path.join(REBOOT_CAUSE_DIR, "previous-reboot-cause.json")
USER_ISSUED_REBOOT_CAUSE_REGEX ="User issued \'{}\' command [User: {}, Time: {}]"
REBOOT_CAUSE_UNKNOWN = "Unknown"
REBOOT_CAUSE_TABLE_NAME = "REBOOT_CAUSE"
REDIS_HOSTIP = "127.0.0.1"
state_db = None
# Global logger class instance
sonic_logger = logger.Logger(SYSLOG_IDENTIFIER)
# ============================= Functions =============================
def read_reboot_cause_files_and_save_state_db():
# Connect State DB
state_db = swsscommon.SonicV2Connector(host=REDIS_HOSTIP)
state_db.connect(state_db.STATE_DB)
# Sort the previous reboot cause files by creation time
REBOOT_FILE_LIST = [os.path.join(REBOOT_CAUSE_HISTORY_DIR, i) for i in os.listdir(REBOOT_CAUSE_HISTORY_DIR)]
TIME_SORTED_FULL_REBOOT_FILE_LIST = sorted(REBOOT_FILE_LIST, key=os.path.getmtime, reverse=True)
data = []
# Read each sorted previous reboot cause file and update the state db with previous reboot cause information
for i in range(min(10, len(TIME_SORTED_FULL_REBOOT_FILE_LIST))):
x = TIME_SORTED_FULL_REBOOT_FILE_LIST[i]
if os.path.isfile(x):
with open(x, "r") as cause_file:
data = json.load(cause_file)
_hash = '{}|{}'.format(REBOOT_CAUSE_TABLE_NAME, data['gen_time'])
state_db.set(state_db.STATE_DB, _hash, 'cause', data['cause'])
state_db.set(state_db.STATE_DB, _hash, 'time', data['time'])
state_db.set(state_db.STATE_DB, _hash, 'user', data['user'])
state_db.set(state_db.STATE_DB, _hash, 'comment', data['comment'])
if len(TIME_SORTED_FULL_REBOOT_FILE_LIST) > 10:
for i in range(len(TIME_SORTED_FULL_REBOOT_FILE_LIST)):
if i >= 10:
x = TIME_SORTED_FULL_REBOOT_FILE_LIST[i]
os.remove(x)
def main():
# Configure logger to log all messages INFO level and higher
sonic_logger.set_min_log_priority_info()
sonic_logger.log_info("Starting up...")
if not os.geteuid() == 0:
sonic_logger.log_error("User {} does not have permission to execute".format(pwd.getpwuid(os.getuid()).pw_name))
sys.exit("This utility must be run as root")
# Set a default previous reboot cause
previous_reboot_cause = REBOOT_CAUSE_UNKNOWN
# Read the most recent reboot cause file and log data to syslog
if os.path.exists(PREVIOUS_REBOOT_CAUSE_FILE):
with open(PREVIOUS_REBOOT_CAUSE_FILE, "r") as last_cause_file:
data = json.load(last_cause_file)
if data['user']:
previous_reboot_cause = USER_ISSUED_REBOOT_CAUSE_REGEX.format(data['cause'], data['user'], data['time'])
else:
previous_reboot_cause = "{}".format(data['cause'])
# Log the last reboot cause to the syslog
sonic_logger.log_info("Previous reboot cause: {}".format(previous_reboot_cause))
if os.path.exists(REBOOT_CAUSE_HISTORY_DIR):
# Read the previous reboot cause from saved reboot-cause files and save the previous reboot cause upto 10 entry to the state db
read_reboot_cause_files_and_save_state_db()
if __name__ == "__main__":
main()

View File

@ -1,88 +0,0 @@
#!/usr/bin/env python3
"""Host Service to handle docker-to-host communication"""
import os
import os.path
import glob
import importlib
import sys
import dbus
import dbus.service
import dbus.mainloop.glib
from gi.repository import GObject
def find_module_path():
"""Find path for host_moduels"""
try:
from host_modules import host_service
return os.path.dirname(host_service.__file__)
except ImportError as e:
return None
def register_modules(mod_path):
"""Register all host modules"""
sys.path.append(mod_path)
for mod_file in glob.glob(os.path.join(mod_path, '*.py')):
if os.path.isfile(mod_file) and not mod_file.endswith('__init__.py'):
mod_name = os.path.basename(mod_file)[:-3]
module = importlib.import_module(mod_name)
register_cb = getattr(module, 'register', None)
if not register_cb:
raise Exception('Missing register function for ' + mod_name)
register_dbus(register_cb)
def register_dbus(register_cb):
"""Register DBus handlers for individual modules"""
handler_class, mod_name = register_cb()
handlers[mod_name] = handler_class(mod_name)
# Create a main loop reactor
GObject.threads_init()
dbus.mainloop.glib.threads_init()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
loop = GObject.MainLoop()
handlers = {}
class SignalManager(object):
''' This is used to manage signals received (e.g. SIGINT).
When stopping a process (systemctl stop [service]), systemd sends
a SIGTERM signal.
'''
shutdown = False
def __init__(self):
''' Install signal handlers.
SIGTERM is invoked when systemd wants to stop the daemon.
For example, "systemctl stop mydaemon.service"
or, "systemctl restart mydaemon.service"
'''
import signal
signal.signal(signal.SIGTERM, self.sigterm_hdlr)
def sigterm_hdlr(self, _signum, _frame):
self.shutdown = True
loop.quit()
sigmgr = SignalManager()
mod_path = find_module_path()
if mod_path is not None:
register_modules(mod_path)
# Only run if we actually have some handlers
if handlers:
import systemd.daemon
systemd.daemon.notify("READY=1")
while not sigmgr.shutdown:
loop.run()
if sigmgr.shutdown:
break
systemd.daemon.notify("STOPPING=1")
else:
print("No handlers to register, quitting...")

View File

@ -1,2 +0,0 @@
[aliases]
test=pytest

View File

@ -1,57 +0,0 @@
from setuptools import setup
setup(
name = 'sonic-host-services',
version = '1.0',
description = 'Python services which run in the SONiC host OS',
license = 'Apache 2.0',
author = 'SONiC Team',
author_email = 'linuxnetdev@microsoft.com',
url = 'https://github.com/Azure/sonic-buildimage',
maintainer = 'Joe LeVeque',
maintainer_email = 'jolevequ@microsoft.com',
packages = [
'host_modules'
],
scripts = [
'scripts/caclmgrd',
'scripts/hostcfgd',
'scripts/aaastatsd',
'scripts/procdockerstatsd',
'scripts/determine-reboot-cause',
'scripts/process-reboot-cause',
'scripts/sonic-host-server'
],
install_requires = [
'dbus-python',
'systemd-python',
'Jinja2>=2.10',
'PyGObject',
'sonic-py-common'
],
setup_requires = [
'pytest-runner',
'wheel'
],
tests_require = [
'parameterized',
'pytest',
'pyfakefs',
'sonic-py-common',
'deepdiff'
],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Topic :: System',
],
keywords = 'sonic SONiC host services',
test_suite = 'setup.get_test_suite'
)

View File

@ -1,50 +0,0 @@
import os
import sys
import swsscommon
from parameterized import parameterized
from sonic_py_common.general import load_module_from_source
from unittest import TestCase, mock
from pyfakefs.fake_filesystem_unittest import patchfs
from .test_bfd_vectors import CACLMGRD_BFD_TEST_VECTOR
from tests.common.mock_configdb import MockConfigDb
from unittest.mock import MagicMock, patch
DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json'
class TestCaclmgrdBfd(TestCase):
"""
Test caclmgrd bfd
"""
def setUp(self):
swsscommon.swsscommon.ConfigDBConnector = MockConfigDb
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
caclmgrd_path = os.path.join(scripts_path, 'caclmgrd')
self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path)
@parameterized.expand(CACLMGRD_BFD_TEST_VECTOR)
@patchfs
def test_caclmgrd_bfd(self, test_name, test_data, fs):
if not os.path.exists(DBCONFIG_PATH):
fs.create_file(DBCONFIG_PATH) # fake database_config.json
MockConfigDb.set_config_db(test_data["config_db"])
with mock.patch("caclmgrd.subprocess") as mocked_subprocess:
popen_mock = mock.Mock()
popen_attrs = test_data["popen_attributes"]
popen_mock.configure_mock(**popen_attrs)
mocked_subprocess.Popen.return_value = popen_mock
mocked_subprocess.PIPE = -1
call_rc = test_data["call_rc"]
mocked_subprocess.call.return_value = call_rc
caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd")
caclmgrd_daemon.allow_bfd_protocol('')
mocked_subprocess.Popen.assert_has_calls(test_data["expected_subprocess_calls"], any_order=True)

View File

@ -1,53 +0,0 @@
import os
import sys
import swsscommon
from parameterized import parameterized
from sonic_py_common.general import load_module_from_source
from unittest import TestCase, mock
from pyfakefs.fake_filesystem_unittest import patchfs
from .test_dhcp_vectors import CACLMGRD_DHCP_TEST_VECTOR
from tests.common.mock_configdb import MockConfigDb
DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json'
class TestCaclmgrdDhcp(TestCase):
"""
Test caclmgrd dhcp
"""
def setUp(self):
swsscommon.ConfigDBConnector = MockConfigDb
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
caclmgrd_path = os.path.join(scripts_path, 'caclmgrd')
self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path)
@parameterized.expand(CACLMGRD_DHCP_TEST_VECTOR)
@patchfs
def test_caclmgrd_dhcp(self, test_name, test_data, fs):
if not os.path.exists(DBCONFIG_PATH):
fs.create_file(DBCONFIG_PATH) # fake database_config.json
MockConfigDb.set_config_db(test_data["config_db"])
with mock.patch("caclmgrd.subprocess") as mocked_subprocess:
popen_mock = mock.Mock()
popen_attrs = test_data["popen_attributes"]
popen_mock.configure_mock(**popen_attrs)
mocked_subprocess.Popen.return_value = popen_mock
call_rc = test_data["call_rc"]
mocked_subprocess.call.return_value = call_rc
mark = test_data["mark"]
caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd")
mux_update = test_data["mux_update"]
for key,data in mux_update:
caclmgrd_daemon.update_dhcp_acl(key, '', data, mark)
mocked_subprocess.call.assert_has_calls(test_data["expected_subprocess_calls"], any_order=False)

View File

@ -1,50 +0,0 @@
import os
import sys
import swsscommon
from parameterized import parameterized
from sonic_py_common.general import load_module_from_source
from unittest import TestCase, mock
from pyfakefs.fake_filesystem_unittest import patchfs
from .test_bfd_vectors import CACLMGRD_BFD_TEST_VECTOR
from tests.common.mock_configdb import MockConfigDb
from unittest.mock import MagicMock, patch
DBCONFIG_PATH = '/var/run/redis/sonic-db/database_config.json'
class TestFeature(TestCase):
"""
Test caclmgrd feature present
"""
def setUp(self):
swsscommon.swsscommon.ConfigDBConnector = MockConfigDb
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
caclmgrd_path = os.path.join(scripts_path, 'caclmgrd')
self.caclmgrd = load_module_from_source('caclmgrd', caclmgrd_path)
@parameterized.expand(CACLMGRD_BFD_TEST_VECTOR)
@patchfs
def test_feature_present(self, test_name, test_data, fs):
if not os.path.exists(DBCONFIG_PATH):
fs.create_file(DBCONFIG_PATH) # fake database_config.json
MockConfigDb.set_config_db(test_data["config_db"])
with mock.patch("caclmgrd.subprocess") as mocked_subprocess:
popen_mock = mock.Mock()
popen_attrs = test_data["popen_attributes"]
popen_mock.configure_mock(**popen_attrs)
mocked_subprocess.Popen.return_value = popen_mock
mocked_subprocess.PIPE = -1
call_rc = test_data["call_rc"]
mocked_subprocess.call.return_value = call_rc
caclmgrd_daemon = self.caclmgrd.ControlPlaneAclManager("caclmgrd")
caclmgrd_daemon.update_feature_present()
self.assertTrue("bgp" in caclmgrd_daemon.feature_present)
self.assertEqual(caclmgrd_daemon.feature_present["bgp"], True)

View File

@ -1,35 +0,0 @@
from unittest.mock import call
import subprocess
"""
caclmgrd bfd test vector
"""
CACLMGRD_BFD_TEST_VECTOR = [
[
"BFD_SESSION_TEST",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
"bgp": {
"auto_restart": "enabled",
"state": "enabled",
}
},
},
"expected_subprocess_calls": [
call("iptables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT", shell=True, universal_newlines=True, stdout=subprocess.PIPE),
call("ip6tables -I INPUT 2 -p udp -m multiport --dports 3784,4784 -j ACCEPT", shell=True, universal_newlines=True, stdout=subprocess.PIPE)
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
}
]
]

View File

@ -1,340 +0,0 @@
from unittest.mock import call
"""
caclmgrd dhcp test vector
"""
CACLMGRD_DHCP_TEST_VECTOR = [
[
"Active_Present_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "active"}),
("Ethernet8", {"state": "active"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --delete DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
call("iptables --delete DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": None,
},
],
[
"Active_Present_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "active"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
call("iptables --delete DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": "0x67004",
},
],
[
"Active_Absent_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "active"}),
("Ethernet8", {"state": "active"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": None,
},
],
[
"Active_Absent_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "active"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": "0x67004",
},
],
[
"Standby_Present_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "standby"}),
("Ethernet8", {"state": "standby"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": None,
},
],
[
"Standby_Present_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "standby"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": "0x67004",
},
],
[
"Standby_Absent_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "standby"}),
("Ethernet8", {"state": "standby"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --insert DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
call("iptables --insert DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": None,
},
],
[
"Standby_Absent_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "standby"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
call("iptables --insert DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": "0x67004",
},
],
[
"Unknown_Present_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "unknown"}),
("Ethernet8", {"state": "unknown"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --delete DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
call("iptables --delete DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": None,
},
],
[
"Unknown_Present_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "unknown"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
call("iptables --delete DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 0,
"mark": "0x67004",
},
],
[
"Uknown_Absent_Interface",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "unknown"}),
("Ethernet8", {"state": "unknown"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m physdev --physdev-in Ethernet4 -j DROP", shell=True),
call("iptables --check DHCP -m physdev --physdev-in Ethernet8 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": None,
},
],
[
"Uknown_Absent_Mark",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"FEATURE": {
},
},
"mux_update": [
("Ethernet4", {"state": "unknown"}),
],
"expected_subprocess_calls": [
call("iptables --check DHCP -m mark --mark 0x67004 -j DROP", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error'),
},
"call_rc": 1,
"mark": "0x67004",
},
],
]

View File

@ -1,58 +0,0 @@
class MockConfigDb(object):
"""
Mock Config DB which responds to data tables requests and store updates to the data table
"""
STATE_DB = None
CONFIG_DB = None
event_queue = []
def __init__(self, **kwargs):
self.handlers = {}
@staticmethod
def set_config_db(test_config_db):
MockConfigDb.CONFIG_DB = test_config_db
@staticmethod
def deserialize_key(key, separator="|"):
tokens = key.split(separator)
if len(tokens) > 1:
return tuple(tokens)
else:
return key
@staticmethod
def get_config_db():
return MockConfigDb.CONFIG_DB
def connect(self, wait_for_init=True, retry_on=True):
pass
def get(self, db_id, key, field):
return MockConfigDb.CONFIG_DB[key][field]
def get_entry(self, key, field):
return MockConfigDb.CONFIG_DB[key][field]
def mod_entry(self, key, field, data):
existing_data = self.get_entry(key, field)
existing_data.update(data)
self.set_entry(key, field, existing_data)
def set_entry(self, key, field, data):
MockConfigDb.CONFIG_DB[key][field] = data
def get_table(self, table_name):
return MockConfigDb.CONFIG_DB[table_name]
def subscribe(self, table_name, callback):
self.handlers[table_name] = callback
def listen(self, init_data_handler=None):
for e in MockConfigDb.event_queue:
self.handlers[e[0]](e[0], e[1], self.get_entry(e[0], e[1]))
class MockDBConnector():
def __init__(self, db, val):
pass

View File

@ -1,119 +0,0 @@
import sys
import os
import pytest
from swsscommon import swsscommon
from sonic_py_common.general import load_module_from_source
# TODO: Remove this if/else block once we no longer support Python 2
if sys.version_info.major == 3:
from unittest import mock
else:
# Expect the 'mock' package for python 2
# https://pypi.python.org/pypi/mock
import mock
# TODO: Remove this if/else block once we no longer support Python 2
if sys.version_info.major == 3:
BUILTINS = "builtins"
else:
BUILTINS = "__builtin__"
from .mock_connector import MockConnector
swsscommon.SonicV2Connector = MockConnector
test_path = os.path.dirname(os.path.abspath(__file__))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
# Load the file under test
determine_reboot_cause_path = os.path.join(scripts_path, 'determine-reboot-cause')
determine_reboot_cause = load_module_from_source('determine_reboot_cause', determine_reboot_cause_path)
PROC_CMDLINE_CONTENTS = """\
BOOT_IMAGE=/image-20191130.52/boot/vmlinuz-4.9.0-11-2-amd64 root=/dev/sda4 rw console=tty0 console=ttyS1,9600n8 quiet net.ifnames=0 biosdevname=0 loop=image-20191130.52/fs.squashfs loopfstype=squashfs apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 module_blacklist=gpio_ich SONIC_BOOT_TYPE=warm"""
EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE = "warm"
PROC_CMDLINE_CONTENTS = """\
BOOT_IMAGE=/image-20191130.52/boot/vmlinuz-4.9.0-11-2-amd64 root=/dev/sda4 rw console=tty0 console=ttyS1,9600n8 quiet net.ifnames=0 biosdevname=0 loop=image-20191130.52/fs.squashfs loopfstype=squashfs apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 module_blacklist=gpio_ich SONIC_BOOT_TYPE=warm"""
REBOOT_CAUSE_CONTENTS = """\
User issued 'warm-reboot' command [User: admin, Time: Mon Nov 2 22:37:45 UTC 2020]"""
GET_SONIC_VERSION_INFO = {'commit_id': 'e59ec8291', 'build_date': 'Mon Nov 2 06:00:14 UTC 2020', 'build_number': 75, 'kernel_version': '4.9.0-11-2-amd64', 'debian_version': '9.13', 'built_by': 'sonicbld@jenkins-slave-phx-2', 'asic_type': 'mellanox', 'build_version': '20191130.52'}
REBOOT_CAUSE_WATCHDOG = "Watchdog"
GEN_TIME_WATCHDOG = "2020_10_22_03_15_08"
REBOOT_CAUSE_USER = "User issued 'reboot' command [User: admin, Time: Thu Oct 22 03:11:08 UTC 2020]"
GEN_TIME_USER = "2020_10_22_03_14_07"
REBOOT_CAUSE_KERNEL_PANIC = "Kernel Panic [Time: Sun Mar 28 13:45:12 UTC 2021]"
GEN_TIME_KERNEL_PANIC = "2021_3_28_13_48_49"
EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE = "warm-reboot"
EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_USER = "User issued 'warm-reboot' command [User: admin, Time: Mon Nov 2 22:37:45 UTC 2020]"
EXPECTED_FIND_FIRSTBOOT_VERSION = " (First boot of SONiC version 20191130.52)"
EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_FIRSTBOOT = "Unknown (First boot of SONiC version 20191130.52)"
EXPECTED_HARDWARE_REBOOT_CAUSE = {"warm-reboot", ""}
EXPECTED_WATCHDOG_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2020_10_22_03_15_08', 'cause': 'Watchdog', 'user': 'N/A', 'time': 'N/A'}
EXPECTED_USER_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2020_10_22_03_14_07', 'cause': 'reboot', 'user': 'admin', 'time': 'Thu Oct 22 03:11:08 UTC 2020'}
EXPECTED_KERNEL_PANIC_REBOOT_CAUSE_DICT = {'comment': '', 'gen_time': '2021_3_28_13_48_49', 'cause': 'Kernel Panic', 'user': 'N/A', 'time': 'Sun Mar 28 13:45:12 UTC 2021'}
class TestDetermineRebootCause(object):
def test_parse_warmfast_reboot_from_proc_cmdline(self):
with mock.patch("os.path.isfile") as mock_isfile:
mock_isfile.return_value = True
open_mocked = mock.mock_open(read_data=PROC_CMDLINE_CONTENTS)
with mock.patch("{}.open".format(BUILTINS), open_mocked):
result = determine_reboot_cause.parse_warmfast_reboot_from_proc_cmdline()
assert result == EXPECTED_PARSE_WARMFAST_REBOOT_FROM_PROC_CMDLINE
open_mocked.assert_called_once_with("/proc/cmdline")
def test_find_software_reboot_cause_user(self):
with mock.patch("os.path.isfile") as mock_isfile:
mock_isfile.return_value = True
open_mocked = mock.mock_open(read_data=REBOOT_CAUSE_CONTENTS)
with mock.patch("{}.open".format(BUILTINS), open_mocked):
result = determine_reboot_cause.find_software_reboot_cause_from_reboot_cause_file()
assert result == EXPECTED_FIND_SOFTWARE_REBOOT_CAUSE_USER
open_mocked.assert_called_once_with("/host/reboot-cause/reboot-cause.txt")
def test_find_software_reboot_cause_first_boot(self):
with mock.patch("sonic_py_common.device_info.get_sonic_version_info", return_value=GET_SONIC_VERSION_INFO):
result = determine_reboot_cause.find_first_boot_version()
assert result == EXPECTED_FIND_FIRSTBOOT_VERSION
def test_find_software_reboot_cause(self):
with mock.patch("determine_reboot_cause.find_software_reboot_cause_from_reboot_cause_file", return_value="Unknown"):
with mock.patch("os.path.isfile") as mock_isfile:
mock_isfile.return_value = False
result = determine_reboot_cause.find_software_reboot_cause()
assert result == "Unknown"
def test_find_proc_cmdline_reboot_cause(self):
with mock.patch("determine_reboot_cause.parse_warmfast_reboot_from_proc_cmdline", return_value="fast-reboot"):
result = determine_reboot_cause.find_proc_cmdline_reboot_cause()
assert result == "fast-reboot"
def test_find_hardware_reboot_cause(self):
with mock.patch("determine_reboot_cause.get_reboot_cause_from_platform", return_value=("Powerloss", None)):
result = determine_reboot_cause.find_hardware_reboot_cause()
assert result == "Powerloss (None)"
def test_get_reboot_cause_dict_watchdog(self):
reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_WATCHDOG, "", GEN_TIME_WATCHDOG)
assert reboot_cause_dict == EXPECTED_WATCHDOG_REBOOT_CAUSE_DICT
def test_get_reboot_cause_dict_user(self):
reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_USER, "", GEN_TIME_USER)
assert reboot_cause_dict == EXPECTED_USER_REBOOT_CAUSE_DICT
def test_get_reboot_cause_dict_kernel_panic(self):
reboot_cause_dict = determine_reboot_cause.get_reboot_cause_dict(REBOOT_CAUSE_KERNEL_PANIC, "", GEN_TIME_KERNEL_PANIC)
assert reboot_cause_dict == EXPECTED_KERNEL_PANIC_REBOOT_CAUSE_DICT

View File

@ -1,182 +0,0 @@
import importlib.machinery
import importlib.util
import filecmp
import shutil
import os
import sys
import subprocess
import re
from parameterized import parameterized
from unittest import TestCase, mock
from tests.hostcfgd.test_passwh_vectors import HOSTCFGD_TEST_PASSWH_VECTOR
from tests.common.mock_configdb import MockConfigDb, MockDBConnector
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
src_path = os.path.dirname(modules_path)
templates_path = os.path.join(src_path, "sonic-host-services-data/templates")
output_path = os.path.join(test_path, "hostcfgd/output")
sample_output_path = os.path.join(test_path, "hostcfgd/sample_output")
sys.path.insert(0, modules_path)
# Load the file under test
hostcfgd_path = os.path.join(scripts_path, 'hostcfgd')
loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
hostcfgd = importlib.util.module_from_spec(spec)
loader.exec_module(hostcfgd)
sys.modules['hostcfgd'] = hostcfgd
# Mock swsscommon classes
hostcfgd.ConfigDBConnector = MockConfigDb
hostcfgd.DBConnector = MockDBConnector
hostcfgd.Table = mock.Mock()
AGE_DICT = { 'MAX_DAYS': {'REGEX_DAYS': r'^PASS_MAX_DAYS[ \t]*(?P<max_days>\d*)', 'DAYS': 'max_days', 'CHAGE_FLAG': '-M '},
'WARN_DAYS': {'REGEX_DAYS': r'^PASS_WARN_AGE[ \t]*(?P<warn_days>\d*)', 'DAYS': 'warn_days', 'CHAGE_FLAG': '-W '}
}
class TestHostcfgdPASSWH(TestCase):
"""
Test hostcfd daemon - PASSWH
"""
def run_diff(self, file1, file2):
try:
diff_out = subprocess.check_output('diff -ur {} {} || true'.format(file1, file2), shell=True)
return diff_out
except subprocess.CalledProcessError as err:
syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}".format(err.cmd, err.returncode, err.output))
return -1
def get_passw_days(self, login_file, age_type):
days_num = -1
regex_days = AGE_DICT[age_type]['REGEX_DAYS']
days_type = AGE_DICT[age_type]['DAYS']
with open(login_file, 'r') as f:
login_def_data = f.readlines()
for line in login_def_data:
m1 = re.match(regex_days, line)
if m1:
days_num = int(m1.group(days_type))
break
return days_num
"""
Check different config
"""
def check_config(self, test_name, test_data, config_name):
t_path = templates_path
op_path = output_path + "/" + test_name + "_" + config_name
sop_path = sample_output_path + "/" + test_name + "_" + config_name
sop_path_common = sample_output_path + "/" + test_name
hostcfgd.PAM_PASSWORD_CONF_TEMPLATE = t_path + "/common-password.j2"
hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2"
hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2"
hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2"
hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2"
hostcfgd.PAM_PASSWORD_CONF = op_path + "/common-password"
hostcfgd.ETC_LOGIN_DEF = op_path + "/login.defs"
hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic"
hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf"
hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf"
hostcfgd.NSS_CONF = op_path + "/nsswitch.conf"
hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd"
hostcfgd.ETC_PAMD_LOGIN = op_path + "/login"
hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/"
shutil.rmtree(op_path, ignore_errors=True)
os.mkdir(op_path)
shutil.copyfile(sop_path_common + "/login.defs.old", op_path + "/login.defs")
MockConfigDb.set_config_db(test_data[config_name])
host_config_daemon = hostcfgd.HostConfigDaemon()
try:
passwh_table = host_config_daemon.config_db.get_table('PASSW_HARDENING')
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "failed: get_table 'PASSW_HARDENING', exception={}".format(e))
passwh_table = []
host_config_daemon.passwcfg.load(passwh_table)
diff_output = ""
files_to_compare = ['common-password']
# check output files exists
for name in files_to_compare:
if not os.path.isfile(sop_path + "/" + name):
raise ValueError('filename: %s not exit' % (sop_path + "/" + name))
if not os.path.isfile(op_path + "/" + name):
raise ValueError('filename: %s not exit' % (op_path + "/" + name))
# deep comparison
match, mismatch, errors = filecmp.cmpfiles(sop_path, op_path, files_to_compare, shallow=False)
if not match:
for name in files_to_compare:
diff_output += self.run_diff( sop_path + "/" + name,\
op_path + "/" + name).decode('utf-8')
self.assertTrue(len(diff_output) == 0, diff_output)
# compare age data in login.def file.
out_passw_age_days = self.get_passw_days(op_path + "/login.defs", 'MAX_DAYS')
sout_passw_age_days = self.get_passw_days(sop_path + "/login.defs", 'MAX_DAYS')
out_passw_age_warn_days = self.get_passw_days(op_path + "/login.defs", 'WARN_DAYS')
sout_passw_age_warn_days = self.get_passw_days(sop_path + "/login.defs", 'WARN_DAYS')
self.assertEqual(out_passw_age_days, sout_passw_age_days)
self.assertEqual(out_passw_age_warn_days, sout_passw_age_warn_days)
@parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR)
def test_hostcfgd_passwh(self, test_name, test_data):
"""
Test PASSWH hostcfd daemon initialization
Args:
test_name(str): test name
test_data(dict): test data which contains initial Config Db tables, and expected results
Returns:
None
"""
self.check_config(test_name, test_data, "default_values")
@parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR)
def test_hostcfgd_passwh_enable(self, test_name, test_data):
"""
Test PASSWH hostcfd daemon initialization
Args:
test_name(str): test name
test_data(dict): test data which contains initial Config Db tables, and expected results
Returns:
None
"""
self.check_config(test_name, test_data, "enable_feature")
@parameterized.expand(HOSTCFGD_TEST_PASSWH_VECTOR)
def test_hostcfgd_passwh_classes(self, test_name, test_data):
"""
Test PASSWH hostcfd daemon initialization
Args:
test_name(str): test name
test_data(dict): test data which contains initial Config Db tables, and expected results
Returns:
None
"""
self.check_config(test_name, test_data, "enable_digits_class")

View File

@ -1,103 +0,0 @@
import importlib.machinery
import importlib.util
import filecmp
import shutil
import os
import sys
import subprocess
from swsscommon import swsscommon
from parameterized import parameterized
from unittest import TestCase, mock
from tests.hostcfgd.test_radius_vectors import HOSTCFGD_TEST_RADIUS_VECTOR
from tests.common.mock_configdb import MockConfigDb, MockDBConnector
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
src_path = os.path.dirname(modules_path)
templates_path = os.path.join(src_path, "sonic-host-services-data/templates")
output_path = os.path.join(test_path, "hostcfgd/output")
sample_output_path = os.path.join(test_path, "hostcfgd/sample_output")
sys.path.insert(0, modules_path)
# Load the file under test
hostcfgd_path = os.path.join(scripts_path, 'hostcfgd')
loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
hostcfgd = importlib.util.module_from_spec(spec)
loader.exec_module(hostcfgd)
sys.modules['hostcfgd'] = hostcfgd
# Mock swsscommon classes
hostcfgd.ConfigDBConnector = MockConfigDb
hostcfgd.DBConnector = MockDBConnector
hostcfgd.Table = mock.Mock()
class TestHostcfgdRADIUS(TestCase):
"""
Test hostcfd daemon - RADIUS
"""
def run_diff(self, file1, file2):
return subprocess.check_output('diff -uR {} {} || true'.format(file1, file2), shell=True)
@parameterized.expand(HOSTCFGD_TEST_RADIUS_VECTOR)
def test_hostcfgd_radius(self, test_name, test_data):
"""
Test RADIUS hostcfd daemon initialization
Args:
test_name(str): test name
test_data(dict): test data which contains initial Config Db tables, and expected results
Returns:
None
"""
t_path = templates_path
op_path = output_path + "/" + test_name
sop_path = sample_output_path + "/" + test_name
hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2"
hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2"
hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2"
hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2"
hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic"
hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf"
hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf"
hostcfgd.NSS_CONF = op_path + "/nsswitch.conf"
hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd"
hostcfgd.ETC_PAMD_LOGIN = op_path + "/login"
hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/"
shutil.rmtree( op_path, ignore_errors=True)
os.mkdir( op_path)
shutil.copyfile( sop_path + "/sshd.old", op_path + "/sshd")
shutil.copyfile( sop_path + "/login.old", op_path + "/login")
MockConfigDb.set_config_db(test_data["config_db"])
host_config_daemon = hostcfgd.HostConfigDaemon()
aaa = host_config_daemon.config_db.get_table('AAA')
try:
radius_global = host_config_daemon.config_db.get_table('RADIUS')
except:
radius_global = []
try:
radius_server = \
host_config_daemon.config_db.get_table('RADIUS_SERVER')
except:
radius_server = []
host_config_daemon.aaacfg.load(aaa,[],[],radius_global,radius_server)
dcmp = filecmp.dircmp(sop_path, op_path)
diff_output = ""
for name in dcmp.diff_files:
diff_output += \
"Diff: file: {} expected: {} output: {}\n".format(\
name, dcmp.left, dcmp.right)
diff_output += self.run_diff( dcmp.left + "/" + name,\
dcmp.right + "/" + name)
self.assertTrue(len(diff_output) == 0, diff_output)

View File

@ -1,116 +0,0 @@
import importlib.machinery
import importlib.util
import filecmp
import shutil
import os
import sys
import subprocess
from swsscommon import swsscommon
from parameterized import parameterized
from unittest import TestCase, mock
from tests.hostcfgd.test_tacacs_vectors import HOSTCFGD_TEST_TACACS_VECTOR
from tests.common.mock_configdb import MockConfigDb, MockDBConnector
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
src_path = os.path.dirname(modules_path)
templates_path = os.path.join(src_path, "sonic-host-services-data/templates")
output_path = os.path.join(test_path, "hostcfgd/output")
sample_output_path = os.path.join(test_path, "hostcfgd/sample_output")
sys.path.insert(0, modules_path)
# Load the file under test
hostcfgd_path = os.path.join(scripts_path, 'hostcfgd')
loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
hostcfgd = importlib.util.module_from_spec(spec)
loader.exec_module(hostcfgd)
sys.modules['hostcfgd'] = hostcfgd
# Mock swsscommon classes
hostcfgd.ConfigDBConnector = MockConfigDb
hostcfgd.DBConnector = MockDBConnector
hostcfgd.Table = mock.Mock()
class TestHostcfgdTACACS(TestCase):
"""
Test hostcfd daemon - TACACS
"""
def run_diff(self, file1, file2):
return subprocess.check_output('diff -uR {} {} || true'.format(file1, file2), shell=True)
"""
Check different config
"""
def check_config(self, test_name, test_data, config_name):
t_path = templates_path
op_path = output_path + "/" + test_name + "_" + config_name
sop_path = sample_output_path + "/" + test_name + "_" + config_name
hostcfgd.PAM_AUTH_CONF_TEMPLATE = t_path + "/common-auth-sonic.j2"
hostcfgd.NSS_TACPLUS_CONF_TEMPLATE = t_path + "/tacplus_nss.conf.j2"
hostcfgd.NSS_RADIUS_CONF_TEMPLATE = t_path + "/radius_nss.conf.j2"
hostcfgd.PAM_RADIUS_AUTH_CONF_TEMPLATE = t_path + "/pam_radius_auth.conf.j2"
hostcfgd.PAM_AUTH_CONF = op_path + "/common-auth-sonic"
hostcfgd.NSS_TACPLUS_CONF = op_path + "/tacplus_nss.conf"
hostcfgd.NSS_RADIUS_CONF = op_path + "/radius_nss.conf"
hostcfgd.NSS_CONF = op_path + "/nsswitch.conf"
hostcfgd.ETC_PAMD_SSHD = op_path + "/sshd"
hostcfgd.ETC_PAMD_LOGIN = op_path + "/login"
hostcfgd.RADIUS_PAM_AUTH_CONF_DIR = op_path + "/"
shutil.rmtree( op_path, ignore_errors=True)
os.mkdir( op_path)
shutil.copyfile( sop_path + "/sshd.old", op_path + "/sshd")
shutil.copyfile( sop_path + "/login.old", op_path + "/login")
MockConfigDb.set_config_db(test_data[config_name])
host_config_daemon = hostcfgd.HostConfigDaemon()
aaa = host_config_daemon.config_db.get_table('AAA')
try:
tacacs_global = host_config_daemon.config_db.get_table('TACPLUS')
except:
tacacs_global = []
try:
tacacs_server = \
host_config_daemon.config_db.get_table('TACPLUS_SERVER')
except:
tacacs_server = []
host_config_daemon.aaacfg.load(aaa,tacacs_global,tacacs_server,[],[])
dcmp = filecmp.dircmp(sop_path, op_path)
diff_output = ""
for name in dcmp.diff_files:
diff_output += \
"Diff: file: {} expected: {} output: {}\n".format(\
name, dcmp.left, dcmp.right)
diff_output += self.run_diff( dcmp.left + "/" + name,\
dcmp.right + "/" + name)
self.assertTrue(len(diff_output) == 0, diff_output)
@parameterized.expand(HOSTCFGD_TEST_TACACS_VECTOR)
def test_hostcfgd_tacacs(self, test_name, test_data):
"""
Test TACACS hostcfd daemon initialization
Args:
test_name(str): test name
test_data(dict): test data which contains initial Config Db tables, and expected results
Returns:
None
"""
# test local config
self.check_config(test_name, test_data, "config_db_local")
# test remote config
self.check_config(test_name, test_data, "config_db_tacacs")
# test local + tacacs config
self.check_config(test_name, test_data, "config_db_local_and_tacacs")
# test disable accounting
self.check_config(test_name, test_data, "config_db_disable_accounting")

View File

@ -1,359 +0,0 @@
import os
import sys
import swsscommon as swsscommon_package
from swsscommon import swsscommon
from parameterized import parameterized
from sonic_py_common.general import load_module_from_source
from unittest import TestCase, mock
from .test_vectors import HOSTCFGD_TEST_VECTOR, HOSTCFG_DAEMON_CFG_DB
from tests.common.mock_configdb import MockConfigDb, MockDBConnector
from pyfakefs.fake_filesystem_unittest import patchfs
from deepdiff import DeepDiff
from unittest.mock import call
test_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, 'scripts')
sys.path.insert(0, modules_path)
# Load the file under test
hostcfgd_path = os.path.join(scripts_path, 'hostcfgd')
hostcfgd = load_module_from_source('hostcfgd', hostcfgd_path)
hostcfgd.ConfigDBConnector = MockConfigDb
hostcfgd.DBConnector = MockDBConnector
hostcfgd.Table = mock.Mock()
class TestFeatureHandler(TestCase):
"""Test methods of `FeatureHandler` class.
"""
def checks_config_table(self, feature_table, expected_table):
"""Compares `FEATURE` table in `CONFIG_DB` with expected output table.
Args:
feature_table: A dictionary indicates current `FEATURE` table in `CONFIG_DB`.
expected_table A dictionary indicates the expected `FEATURE` table in `CONFIG_DB`.
Returns:
Returns True if `FEATURE` table in `CONFIG_DB` was not modified unexpectedly;
otherwise, returns False.
"""
ddiff = DeepDiff(feature_table, expected_table, ignore_order=True)
return True if not ddiff else False
def checks_systemd_config_file(self, feature_table):
"""Checks whether the systemd configuration file of each feature was created or not
and whether the `Restart=` field in the file is set correctly or not.
Args:
feature_table: A dictionary indicates `Feature` table in `CONFIG_DB`.
Returns: Boolean value indicates whether test passed or not.
"""
truth_table = {'enabled': 'always',
'disabled': 'no'}
systemd_config_file_path = os.path.join(hostcfgd.FeatureHandler.SYSTEMD_SERVICE_CONF_DIR,
'auto_restart.conf')
for feature_name in feature_table:
auto_restart_status = feature_table[feature_name].get('auto_restart', 'disabled')
if "enabled" in auto_restart_status:
auto_restart_status = "enabled"
elif "disabled" in auto_restart_status:
auto_restart_status = "disabled"
feature_systemd_config_file_path = systemd_config_file_path.format(feature_name)
is_config_file_existing = os.path.exists(feature_systemd_config_file_path)
assert is_config_file_existing, "Systemd configuration file of feature '{}' does not exist!".format(feature_name)
with open(feature_systemd_config_file_path) as systemd_config_file:
status = systemd_config_file.read().strip()
assert status == '[Service]\nRestart={}'.format(truth_table[auto_restart_status])
def get_state_db_set_calls(self, feature_table):
"""Returns a Mock call objects which recorded the `set` calls to `FEATURE` table in `STATE_DB`.
Args:
feature_table: A dictionary indicates `FEATURE` table in `CONFIG_DB`.
Returns:
set_call_list: A list indicates Mock call objects.
"""
set_call_list = []
for feature_name in feature_table.keys():
feature_state = ""
if "enabled" in feature_table[feature_name]["state"]:
feature_state = "enabled"
elif "disabled" in feature_table[feature_name]["state"]:
feature_state = "disabled"
else:
feature_state = feature_table[feature_name]["state"]
set_call_list.append(mock.call(feature_name, [("state", feature_state)]))
return set_call_list
@parameterized.expand(HOSTCFGD_TEST_VECTOR)
@patchfs
def test_sync_state_field(self, test_scenario_name, config_data, fs):
"""Tests the method `sync_state_field(...)` of `FeatureHandler` class.
Args:
test_secnario_name: A string indicates different testing scenario.
config_data: A dictionary contains initial `CONFIG_DB` tables and expected results.
Returns:
Boolean value indicates whether test will pass or not.
"""
# add real path of sesscommon for database_config.json
fs.add_real_paths(swsscommon_package.__path__)
fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR)
MockConfigDb.set_config_db(config_data['config_db'])
feature_state_table_mock = mock.Mock()
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = config_data['popen_attributes']
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
device_config = {}
device_config['DEVICE_METADATA'] = MockConfigDb.CONFIG_DB['DEVICE_METADATA']
feature_handler = hostcfgd.FeatureHandler(MockConfigDb(), feature_state_table_mock, device_config)
feature_table = MockConfigDb.CONFIG_DB['FEATURE']
feature_handler.sync_state_field(feature_table)
is_any_difference = self.checks_config_table(MockConfigDb.get_config_db()['FEATURE'],
config_data['expected_config_db']['FEATURE'])
assert is_any_difference, "'FEATURE' table in 'CONFIG_DB' is modified unexpectedly!"
feature_table_state_db_calls = self.get_state_db_set_calls(feature_table)
self.checks_systemd_config_file(config_data['config_db']['FEATURE'])
mocked_subprocess.check_call.assert_has_calls(config_data['enable_feature_subprocess_calls'],
any_order=True)
mocked_subprocess.check_call.assert_has_calls(config_data['daemon_reload_subprocess_call'],
any_order=True)
feature_state_table_mock.set.assert_has_calls(feature_table_state_db_calls)
self.checks_systemd_config_file(config_data['config_db']['FEATURE'])
@parameterized.expand(HOSTCFGD_TEST_VECTOR)
@patchfs
def test_handler(self, test_scenario_name, config_data, fs):
"""Tests the method `handle(...)` of `FeatureHandler` class.
Args:
test_secnario_name: A string indicates different testing scenario.
config_data: A dictionary contains initial `CONFIG_DB` tables and expected results.
Returns:
Boolean value indicates whether test will pass or not.
"""
# add real path of sesscommon for database_config.json
fs.add_real_paths(swsscommon_package.__path__)
fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR)
MockConfigDb.set_config_db(config_data['config_db'])
feature_state_table_mock = mock.Mock()
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = config_data['popen_attributes']
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
device_config = {}
device_config['DEVICE_METADATA'] = MockConfigDb.CONFIG_DB['DEVICE_METADATA']
feature_handler = hostcfgd.FeatureHandler(MockConfigDb(), feature_state_table_mock, device_config)
feature_table = MockConfigDb.CONFIG_DB['FEATURE']
for feature_name, feature_config in feature_table.items():
feature_handler.handler(feature_name, 'SET', feature_config)
self.checks_systemd_config_file(config_data['config_db']['FEATURE'])
mocked_subprocess.check_call.assert_has_calls(config_data['enable_feature_subprocess_calls'],
any_order=True)
mocked_subprocess.check_call.assert_has_calls(config_data['daemon_reload_subprocess_call'],
any_order=True)
def test_feature_config_parsing(self):
swss_feature = hostcfgd.Feature('swss', {
'state': 'enabled',
'auto_restart': 'enabled',
'has_timer': 'True',
'has_global_scope': 'False',
'has_per_asic_scope': 'True',
})
assert swss_feature.name == 'swss'
assert swss_feature.state == 'enabled'
assert swss_feature.auto_restart == 'enabled'
assert swss_feature.has_timer
assert not swss_feature.has_global_scope
assert swss_feature.has_per_asic_scope
def test_feature_config_parsing_defaults(self):
swss_feature = hostcfgd.Feature('swss', {
'state': 'enabled',
})
assert swss_feature.name == 'swss'
assert swss_feature.state == 'enabled'
assert swss_feature.auto_restart == 'disabled'
assert not swss_feature.has_timer
assert swss_feature.has_global_scope
assert not swss_feature.has_per_asic_scope
class TesNtpCfgd(TestCase):
"""
Test hostcfd daemon - NtpCfgd
"""
def setUp(self):
MockConfigDb.CONFIG_DB['NTP'] = {'global': {'vrf': 'mgmt', 'src_intf': 'eth0'}}
MockConfigDb.CONFIG_DB['NTP_SERVER'] = {'0.debian.pool.ntp.org': {}}
def tearDown(self):
MockConfigDb.CONFIG_DB = {}
def test_ntp_global_update_with_no_servers(self):
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
ntpcfgd = hostcfgd.NtpCfg()
ntpcfgd.ntp_global_update('global', MockConfigDb.CONFIG_DB['NTP']['global'])
mocked_subprocess.check_call.assert_not_called()
def test_ntp_global_update_ntp_servers(self):
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
ntpcfgd = hostcfgd.NtpCfg()
ntpcfgd.ntp_global_update('global', MockConfigDb.CONFIG_DB['NTP']['global'])
ntpcfgd.ntp_server_update('0.debian.pool.ntp.org', 'SET')
mocked_subprocess.check_call.assert_has_calls([call('systemctl restart ntp-config', shell=True)])
def test_loopback_update(self):
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
ntpcfgd = hostcfgd.NtpCfg()
ntpcfgd.ntp_global = MockConfigDb.CONFIG_DB['NTP']['global']
ntpcfgd.ntp_servers.add('0.debian.pool.ntp.org')
ntpcfgd.handle_ntp_source_intf_chg('eth0')
mocked_subprocess.check_call.assert_has_calls([call('systemctl restart ntp-config', shell=True)])
class TestHostcfgdDaemon(TestCase):
def setUp(self):
MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB)
def tearDown(self):
MockConfigDb.CONFIG_DB = {}
@patchfs
def test_feature_events(self, fs):
fs.create_dir(hostcfgd.FeatureHandler.SYSTEMD_SYSTEM_DIR)
MockConfigDb.event_queue = [('FEATURE', 'dhcp_relay'),
('FEATURE', 'mux'),
('FEATURE', 'telemetry')]
daemon = hostcfgd.HostConfigDaemon()
daemon.register_callbacks()
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
try:
daemon.start()
except TimeoutError:
pass
expected = [call('sudo systemctl daemon-reload', shell=True),
call('sudo systemctl unmask dhcp_relay.service', shell=True),
call('sudo systemctl enable dhcp_relay.service', shell=True),
call('sudo systemctl start dhcp_relay.service', shell=True),
call('sudo systemctl daemon-reload', shell=True),
call('sudo systemctl unmask mux.service', shell=True),
call('sudo systemctl enable mux.service', shell=True),
call('sudo systemctl start mux.service', shell=True),
call('sudo systemctl daemon-reload', shell=True),
call('sudo systemctl unmask telemetry.service', shell=True),
call('sudo systemctl unmask telemetry.timer', shell=True),
call('sudo systemctl enable telemetry.timer', shell=True),
call('sudo systemctl start telemetry.timer', shell=True)]
mocked_subprocess.check_call.assert_has_calls(expected)
# Change the state to disabled
MockConfigDb.CONFIG_DB['FEATURE']['telemetry']['state'] = 'disabled'
MockConfigDb.event_queue = [('FEATURE', 'telemetry')]
try:
daemon.start()
except TimeoutError:
pass
expected = [call('sudo systemctl stop telemetry.timer', shell=True),
call('sudo systemctl disable telemetry.timer', shell=True),
call('sudo systemctl mask telemetry.timer', shell=True),
call('sudo systemctl stop telemetry.service', shell=True),
call('sudo systemctl disable telemetry.timer', shell=True),
call('sudo systemctl mask telemetry.timer', shell=True)]
mocked_subprocess.check_call.assert_has_calls(expected)
def test_loopback_events(self):
MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB)
MockConfigDb.event_queue = [('NTP', 'global'),
('NTP_SERVER', '0.debian.pool.ntp.org'),
('LOOPBACK_INTERFACE', 'Loopback0|10.184.8.233/32')]
daemon = hostcfgd.HostConfigDaemon()
daemon.register_callbacks()
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
try:
daemon.start()
except TimeoutError:
pass
expected = [call('systemctl restart ntp-config', shell=True),
call('iptables -t mangle --append PREROUTING -p tcp --tcp-flags SYN SYN -d 10.184.8.233 -j TCPMSS --set-mss 1460', shell=True),
call('iptables -t mangle --append POSTROUTING -p tcp --tcp-flags SYN SYN -s 10.184.8.233 -j TCPMSS --set-mss 1460', shell=True)]
mocked_subprocess.check_call.assert_has_calls(expected, any_order=True)
def test_kdump_event(self):
MockConfigDb.set_config_db(HOSTCFG_DAEMON_CFG_DB)
daemon = hostcfgd.HostConfigDaemon()
daemon.register_callbacks()
MockConfigDb.event_queue = [('KDUMP', 'config')]
with mock.patch('hostcfgd.subprocess') as mocked_subprocess:
popen_mock = mock.Mock()
attrs = {'communicate.return_value': ('output', 'error')}
popen_mock.configure_mock(**attrs)
mocked_subprocess.Popen.return_value = popen_mock
try:
daemon.start()
except TimeoutError:
pass
expected = [call('sonic-kdump-config --disable', shell=True),
call('sonic-kdump-config --num_dumps 3', shell=True),
call('sonic-kdump-config --memory 0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M', shell=True)]
mocked_subprocess.check_call.assert_has_calls(expected, any_order=True)

View File

@ -1,4 +0,0 @@
# Ignore all test generated files
*
# But keep this file
!.gitignore

View File

@ -1,21 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok try_first_pass
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,56 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,40 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,340 +0,0 @@
#
# /etc/login.defs - Configuration control definitions for the login package.
#
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
# If unspecified, some arbitrary (and possibly incorrect) value will
# be assumed. All other items are optional - if not specified then
# the described action or option will be inhibited.
#
# Comment lines (lines beginning with "#") and blank lines are ignored.
#
# Modified for Linux. --marekm
# REQUIRED for useradd/userdel/usermod
# Directory where mailboxes reside, _or_ name of file, relative to the
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
# MAIL_DIR takes precedence.
#
# Essentially:
# - MAIL_DIR defines the location of users mail spool files
# (for mbox use) by appending the username to MAIL_DIR as defined
# below.
# - MAIL_FILE defines the location of the users mail spool files as the
# fully-qualified filename obtained by prepending the user home
# directory before $MAIL_FILE
#
# NOTE: This is no more used for setting up users MAIL environment variable
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
# job of the pam_mail PAM modules
# See default PAM configuration files provided for
# login, su, etc.
#
# This is a temporary situation: setting these variables will soon
# move to /etc/default/useradd and the variables will then be
# no more supported
MAIL_DIR /var/mail
#MAIL_FILE .mail
#
# Enable logging and display of /var/log/faillog login failure info.
# This option conflicts with the pam_tally PAM module.
#
FAILLOG_ENAB yes
#
# Enable display of unknown usernames when login failures are recorded.
#
# WARNING: Unknown usernames may become world readable.
# See #290803 and #298773 for details about how this could become a security
# concern
LOG_UNKFAIL_ENAB no
#
# Enable logging of successful logins
#
LOG_OK_LOGINS no
#
# Enable "syslog" logging of su activity - in addition to sulog file logging.
# SYSLOG_SG_ENAB does the same for newgrp and sg.
#
SYSLOG_SU_ENAB yes
SYSLOG_SG_ENAB yes
#
# If defined, all su activity is logged to this file.
#
#SULOG_FILE /var/log/sulog
#
# If defined, file which maps tty line to TERM environment parameter.
# Each line of the file is in a format something like "vt100 tty01".
#
#TTYTYPE_FILE /etc/ttytype
#
# If defined, login failures will be logged here in a utmp format
# last, when invoked as lastb, will read /var/log/btmp, so...
#
FTMP_FILE /var/log/btmp
#
# If defined, the command name to display when running "su -". For
# example, if this is defined as "su" then a "ps" will display the
# command is "-su". If not defined, then "ps" would display the
# name of the shell actually being run, e.g. something like "-sh".
#
SU_NAME su
#
# If defined, file which inhibits all the usual chatter during the login
# sequence. If a full pathname, then hushed mode will be enabled if the
# user's name or shell are found in the file. If not a full pathname, then
# hushed mode will be enabled if the file exists in the user's home directory.
#
HUSHLOGIN_FILE .hushlogin
#HUSHLOGIN_FILE /etc/hushlogins
#
# *REQUIRED* The default PATH settings, for superuser and normal users.
#
# (they are minimal, add the rest in the shell startup files)
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
#
# Terminal permissions
#
# TTYGROUP Login tty will be assigned this group ownership.
# TTYPERM Login tty will be set to this permission.
#
# If you have a "write" program which is "setgid" to a special group
# which owns the terminals, define TTYGROUP to the group number and
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
# TTYPERM to either 622 or 600.
#
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
# However, the default and recommended value for TTYPERM is still 0600
# to not allow anyone to write to anyone else console or terminal
# Users can still allow other people to write them by issuing
# the "mesg y" command.
TTYGROUP tty
TTYPERM 0600
#
# Login configuration initializations:
#
# ERASECHAR Terminal ERASE character ('\010' = backspace).
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
# UMASK Default "umask" value.
#
# The ERASECHAR and KILLCHAR are used only on System V machines.
#
# UMASK is the default umask value for pam_umask and is used by
# useradd and newusers to set the mode of the new home directories.
# 022 is the "historical" value in Debian for UMASK
# 027, or even 077, could be considered better for privacy
# There is no One True Answer here : each sysadmin must make up his/her
# mind.
#
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
# for private user groups, i. e. the uid is the same as gid, and username is
# the same as the primary group name: for these, the user permissions will be
# used as group permissions, e. g. 022 will become 002.
#
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
#
ERASECHAR 0177
KILLCHAR 025
UMASK 022
#
# Password aging controls:
#
# PASS_MAX_DAYS Maximum number of days a password may be used.
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
# PASS_WARN_AGE Number of days warning given before a password expires.
#
PASS_MAX_DAYS 99999
PASS_MIN_DAYS 0
PASS_WARN_AGE 7
#
# Min/max values for automatic uid selection in useradd
#
UID_MIN 1000
UID_MAX 60000
# System accounts
#SYS_UID_MIN 100
#SYS_UID_MAX 999
#
# Min/max values for automatic gid selection in groupadd
#
GID_MIN 1000
GID_MAX 60000
# System accounts
#SYS_GID_MIN 100
#SYS_GID_MAX 999
#
# Max number of login retries if password is bad. This will most likely be
# overriden by PAM, since the default pam_unix module has it's own built
# in of 3 retries. However, this is a safe fallback in case you are using
# an authentication module that does not enforce PAM_MAXTRIES.
#
LOGIN_RETRIES 5
#
# Max time in seconds for login
#
LOGIN_TIMEOUT 60
#
# Which fields may be changed by regular users using chfn - use
# any combination of letters "frwh" (full name, room number, work
# phone, home phone). If not defined, no changes are allowed.
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
#
CHFN_RESTRICT rwh
#
# Should login be allowed if we can't cd to the home directory?
# Default in no.
#
DEFAULT_HOME yes
#
# If defined, this command is run when removing a user.
# It should remove any at/cron/print jobs etc. owned by
# the user to be removed (passed as the first argument).
#
#USERDEL_CMD /usr/sbin/userdel_local
#
# If set to yes, userdel will remove the user's group if it contains no
# more members, and useradd will create by default a group with the name
# of the user.
#
# Other former uses of this variable such as setting the umask when
# user==primary group are not used in PAM environments, such as Debian
#
USERGROUPS_ENAB yes
#
# Instead of the real user shell, the program specified by this parameter
# will be launched, although its visible name (argv[0]) will be the shell's.
# The program may do whatever it wants (logging, additional authentification,
# banner, ...) before running the actual shell.
#
# FAKE_SHELL /bin/fakeshell
#
# If defined, either full pathname of a file containing device names or
# a ":" delimited list of device names. Root logins will be allowed only
# upon these devices.
#
# This variable is used by login and su.
#
#CONSOLE /etc/consoles
#CONSOLE console:tty01:tty02:tty03:tty04
#
# List of groups to add to the user's supplementary group set
# when logging in on the console (as determined by the CONSOLE
# setting). Default is none.
#
# Use with caution - it is possible for users to gain permanent
# access to these groups, even when not logged in on the console.
# How to do it is left as an exercise for the reader...
#
# This variable is used by login and su.
#
#CONSOLE_GROUPS floppy:audio:cdrom
#
# If set to "yes", new passwords will be encrypted using the MD5-based
# algorithm compatible with the one used by recent releases of FreeBSD.
# It supports passwords of unlimited length and longer salt strings.
# Set to "no" if you need to copy encrypted passwords to other systems
# which don't understand the new algorithm. Default is "no".
#
# This variable is deprecated. You should use ENCRYPT_METHOD.
#
#MD5_CRYPT_ENAB no
#
# If set to MD5 , MD5-based algorithm will be used for encrypting password
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
# If set to DES, DES-based algorithm will be used for encrypting password (default)
# Overrides the MD5_CRYPT_ENAB option
#
# Note: It is recommended to use a value consistent with
# the PAM modules configuration.
#
ENCRYPT_METHOD SHA512
#
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
#
# Define the number of SHA rounds.
# With a lot of rounds, it is more difficult to brute forcing the password.
# But note also that it more CPU resources will be needed to authenticate
# users.
#
# If not specified, the libc will choose the default number of rounds (5000).
# The values must be inside the 1000-999999999 range.
# If only one of the MIN or MAX values is set, then this value will be used.
# If MIN > MAX, the highest value will be used.
#
# SHA_CRYPT_MIN_ROUNDS 5000
# SHA_CRYPT_MAX_ROUNDS 5000
################# OBSOLETED BY PAM ##############
# #
# These options are now handled by PAM. Please #
# edit the appropriate file in /etc/pam.d/ to #
# enable the equivelants of them.
#
###############
#MOTD_FILE
#DIALUPS_CHECK_ENAB
#LASTLOG_ENAB
#MAIL_CHECK_ENAB
#OBSCURE_CHECKS_ENAB
#PORTTIME_CHECKS_ENAB
#SU_WHEEL_ONLY
#CRACKLIB_DICTPATH
#PASS_CHANGE_TRIES
#PASS_ALWAYS_WARN
#ENVIRON_FILE
#NOLOGINS_FILE
#ISSUE_FILE
#PASS_MIN_LEN
#PASS_MAX_LEN
#ULIMIT
#ENV_HZ
#CHFN_AUTH
#CHSH_AUTH
#FAIL_DELAY
################# OBSOLETED #######################
# #
# These options are no more handled by shadow. #
# #
# Shadow utilities will display a warning if they #
# still appear. #
# #
###################################################
# CLOSE_SESSIONS
# LOGIN_STRING
# NO_PASSWORD_CONSOLE
# QMAIL_DIR

View File

@ -1,36 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-password - password-related modules common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of modules that define the services to be
# used to change user passwords. The default is pam_unix.
# Explanation of pam_unix options:
# The "yescrypt" option enables
#hashed passwords using the yescrypt algorithm, introduced in Debian
#11. Without this option, the default is Unix crypt. Prior releases
#used the option "sha512"; if a shadow password hash will be shared
#between Debian 11 and older releases replace "yescrypt" with "sha512"
#for compatibility . The "obscure" option replaces the old
#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage
#for other options.
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
# here are the per-package modules (the "Primary" block)
password [success=1 default=ignore] pam_unix.so obscure yescrypt
# here's the fallback if no module succeeds
password requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
password required pam_permit.so
# and here are more per-package modules (the "Additional" block)
# end of pam-auth-update config

View File

@ -1,340 +0,0 @@
#
# /etc/login.defs - Configuration control definitions for the login package.
#
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
# If unspecified, some arbitrary (and possibly incorrect) value will
# be assumed. All other items are optional - if not specified then
# the described action or option will be inhibited.
#
# Comment lines (lines beginning with "#") and blank lines are ignored.
#
# Modified for Linux. --marekm
# REQUIRED for useradd/userdel/usermod
# Directory where mailboxes reside, _or_ name of file, relative to the
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
# MAIL_DIR takes precedence.
#
# Essentially:
# - MAIL_DIR defines the location of users mail spool files
# (for mbox use) by appending the username to MAIL_DIR as defined
# below.
# - MAIL_FILE defines the location of the users mail spool files as the
# fully-qualified filename obtained by prepending the user home
# directory before $MAIL_FILE
#
# NOTE: This is no more used for setting up users MAIL environment variable
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
# job of the pam_mail PAM modules
# See default PAM configuration files provided for
# login, su, etc.
#
# This is a temporary situation: setting these variables will soon
# move to /etc/default/useradd and the variables will then be
# no more supported
MAIL_DIR /var/mail
#MAIL_FILE .mail
#
# Enable logging and display of /var/log/faillog login failure info.
# This option conflicts with the pam_tally PAM module.
#
FAILLOG_ENAB yes
#
# Enable display of unknown usernames when login failures are recorded.
#
# WARNING: Unknown usernames may become world readable.
# See #290803 and #298773 for details about how this could become a security
# concern
LOG_UNKFAIL_ENAB no
#
# Enable logging of successful logins
#
LOG_OK_LOGINS no
#
# Enable "syslog" logging of su activity - in addition to sulog file logging.
# SYSLOG_SG_ENAB does the same for newgrp and sg.
#
SYSLOG_SU_ENAB yes
SYSLOG_SG_ENAB yes
#
# If defined, all su activity is logged to this file.
#
#SULOG_FILE /var/log/sulog
#
# If defined, file which maps tty line to TERM environment parameter.
# Each line of the file is in a format something like "vt100 tty01".
#
#TTYTYPE_FILE /etc/ttytype
#
# If defined, login failures will be logged here in a utmp format
# last, when invoked as lastb, will read /var/log/btmp, so...
#
FTMP_FILE /var/log/btmp
#
# If defined, the command name to display when running "su -". For
# example, if this is defined as "su" then a "ps" will display the
# command is "-su". If not defined, then "ps" would display the
# name of the shell actually being run, e.g. something like "-sh".
#
SU_NAME su
#
# If defined, file which inhibits all the usual chatter during the login
# sequence. If a full pathname, then hushed mode will be enabled if the
# user's name or shell are found in the file. If not a full pathname, then
# hushed mode will be enabled if the file exists in the user's home directory.
#
HUSHLOGIN_FILE .hushlogin
#HUSHLOGIN_FILE /etc/hushlogins
#
# *REQUIRED* The default PATH settings, for superuser and normal users.
#
# (they are minimal, add the rest in the shell startup files)
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
#
# Terminal permissions
#
# TTYGROUP Login tty will be assigned this group ownership.
# TTYPERM Login tty will be set to this permission.
#
# If you have a "write" program which is "setgid" to a special group
# which owns the terminals, define TTYGROUP to the group number and
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
# TTYPERM to either 622 or 600.
#
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
# However, the default and recommended value for TTYPERM is still 0600
# to not allow anyone to write to anyone else console or terminal
# Users can still allow other people to write them by issuing
# the "mesg y" command.
TTYGROUP tty
TTYPERM 0600
#
# Login configuration initializations:
#
# ERASECHAR Terminal ERASE character ('\010' = backspace).
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
# UMASK Default "umask" value.
#
# The ERASECHAR and KILLCHAR are used only on System V machines.
#
# UMASK is the default umask value for pam_umask and is used by
# useradd and newusers to set the mode of the new home directories.
# 022 is the "historical" value in Debian for UMASK
# 027, or even 077, could be considered better for privacy
# There is no One True Answer here : each sysadmin must make up his/her
# mind.
#
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
# for private user groups, i. e. the uid is the same as gid, and username is
# the same as the primary group name: for these, the user permissions will be
# used as group permissions, e. g. 022 will become 002.
#
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
#
ERASECHAR 0177
KILLCHAR 025
UMASK 022
#
# Password aging controls:
#
# PASS_MAX_DAYS Maximum number of days a password may be used.
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
# PASS_WARN_AGE Number of days warning given before a password expires.
#
PASS_MAX_DAYS 99999
PASS_MIN_DAYS 0
PASS_WARN_AGE 7
#
# Min/max values for automatic uid selection in useradd
#
UID_MIN 1000
UID_MAX 60000
# System accounts
#SYS_UID_MIN 100
#SYS_UID_MAX 999
#
# Min/max values for automatic gid selection in groupadd
#
GID_MIN 1000
GID_MAX 60000
# System accounts
#SYS_GID_MIN 100
#SYS_GID_MAX 999
#
# Max number of login retries if password is bad. This will most likely be
# overriden by PAM, since the default pam_unix module has it's own built
# in of 3 retries. However, this is a safe fallback in case you are using
# an authentication module that does not enforce PAM_MAXTRIES.
#
LOGIN_RETRIES 5
#
# Max time in seconds for login
#
LOGIN_TIMEOUT 60
#
# Which fields may be changed by regular users using chfn - use
# any combination of letters "frwh" (full name, room number, work
# phone, home phone). If not defined, no changes are allowed.
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
#
CHFN_RESTRICT rwh
#
# Should login be allowed if we can't cd to the home directory?
# Default in no.
#
DEFAULT_HOME yes
#
# If defined, this command is run when removing a user.
# It should remove any at/cron/print jobs etc. owned by
# the user to be removed (passed as the first argument).
#
#USERDEL_CMD /usr/sbin/userdel_local
#
# If set to yes, userdel will remove the user's group if it contains no
# more members, and useradd will create by default a group with the name
# of the user.
#
# Other former uses of this variable such as setting the umask when
# user==primary group are not used in PAM environments, such as Debian
#
USERGROUPS_ENAB yes
#
# Instead of the real user shell, the program specified by this parameter
# will be launched, although its visible name (argv[0]) will be the shell's.
# The program may do whatever it wants (logging, additional authentification,
# banner, ...) before running the actual shell.
#
# FAKE_SHELL /bin/fakeshell
#
# If defined, either full pathname of a file containing device names or
# a ":" delimited list of device names. Root logins will be allowed only
# upon these devices.
#
# This variable is used by login and su.
#
#CONSOLE /etc/consoles
#CONSOLE console:tty01:tty02:tty03:tty04
#
# List of groups to add to the user's supplementary group set
# when logging in on the console (as determined by the CONSOLE
# setting). Default is none.
#
# Use with caution - it is possible for users to gain permanent
# access to these groups, even when not logged in on the console.
# How to do it is left as an exercise for the reader...
#
# This variable is used by login and su.
#
#CONSOLE_GROUPS floppy:audio:cdrom
#
# If set to "yes", new passwords will be encrypted using the MD5-based
# algorithm compatible with the one used by recent releases of FreeBSD.
# It supports passwords of unlimited length and longer salt strings.
# Set to "no" if you need to copy encrypted passwords to other systems
# which don't understand the new algorithm. Default is "no".
#
# This variable is deprecated. You should use ENCRYPT_METHOD.
#
#MD5_CRYPT_ENAB no
#
# If set to MD5 , MD5-based algorithm will be used for encrypting password
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
# If set to DES, DES-based algorithm will be used for encrypting password (default)
# Overrides the MD5_CRYPT_ENAB option
#
# Note: It is recommended to use a value consistent with
# the PAM modules configuration.
#
ENCRYPT_METHOD SHA512
#
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
#
# Define the number of SHA rounds.
# With a lot of rounds, it is more difficult to brute forcing the password.
# But note also that it more CPU resources will be needed to authenticate
# users.
#
# If not specified, the libc will choose the default number of rounds (5000).
# The values must be inside the 1000-999999999 range.
# If only one of the MIN or MAX values is set, then this value will be used.
# If MIN > MAX, the highest value will be used.
#
# SHA_CRYPT_MIN_ROUNDS 5000
# SHA_CRYPT_MAX_ROUNDS 5000
################# OBSOLETED BY PAM ##############
# #
# These options are now handled by PAM. Please #
# edit the appropriate file in /etc/pam.d/ to #
# enable the equivelants of them.
#
###############
#MOTD_FILE
#DIALUPS_CHECK_ENAB
#LASTLOG_ENAB
#MAIL_CHECK_ENAB
#OBSCURE_CHECKS_ENAB
#PORTTIME_CHECKS_ENAB
#SU_WHEEL_ONLY
#CRACKLIB_DICTPATH
#PASS_CHANGE_TRIES
#PASS_ALWAYS_WARN
#ENVIRON_FILE
#NOLOGINS_FILE
#ISSUE_FILE
#PASS_MIN_LEN
#PASS_MAX_LEN
#ULIMIT
#ENV_HZ
#CHFN_AUTH
#CHSH_AUTH
#FAIL_DELAY
################# OBSOLETED #######################
# #
# These options are no more handled by shadow. #
# #
# Shadow utilities will display a warning if they #
# still appear. #
# #
###################################################
# CLOSE_SESSIONS
# LOGIN_STRING
# NO_PASSWORD_CONSOLE
# QMAIL_DIR

View File

@ -1,39 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-password - password-related modules common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of modules that define the services to be
# used to change user passwords. The default is pam_unix.
# Explanation of pam_unix options:
# The "yescrypt" option enables
#hashed passwords using the yescrypt algorithm, introduced in Debian
#11. Without this option, the default is Unix crypt. Prior releases
#used the option "sha512"; if a shadow password hash will be shared
#between Debian 11 and older releases replace "yescrypt" with "sha512"
#for compatibility . The "obscure" option replaces the old
#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage
#for other options.
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
# here are the per-package modules (the "Primary" block)
password requisite pam_cracklib.so retry=3 maxrepeat=0 minlen=8 ucredit=0 lcredit=0 dcredit=-1 ocredit=0 enforce_for_root
password required pam_pwhistory.so remember=0 use_authtok enforce_for_root
password [success=1 default=ignore] pam_unix.so obscure yescrypt
# here's the fallback if no module succeeds
password requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
password required pam_permit.so
# and here are more per-package modules (the "Additional" block)
# end of pam-auth-update config

View File

@ -1,340 +0,0 @@
#
# /etc/login.defs - Configuration control definitions for the login package.
#
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
# If unspecified, some arbitrary (and possibly incorrect) value will
# be assumed. All other items are optional - if not specified then
# the described action or option will be inhibited.
#
# Comment lines (lines beginning with "#") and blank lines are ignored.
#
# Modified for Linux. --marekm
# REQUIRED for useradd/userdel/usermod
# Directory where mailboxes reside, _or_ name of file, relative to the
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
# MAIL_DIR takes precedence.
#
# Essentially:
# - MAIL_DIR defines the location of users mail spool files
# (for mbox use) by appending the username to MAIL_DIR as defined
# below.
# - MAIL_FILE defines the location of the users mail spool files as the
# fully-qualified filename obtained by prepending the user home
# directory before $MAIL_FILE
#
# NOTE: This is no more used for setting up users MAIL environment variable
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
# job of the pam_mail PAM modules
# See default PAM configuration files provided for
# login, su, etc.
#
# This is a temporary situation: setting these variables will soon
# move to /etc/default/useradd and the variables will then be
# no more supported
MAIL_DIR /var/mail
#MAIL_FILE .mail
#
# Enable logging and display of /var/log/faillog login failure info.
# This option conflicts with the pam_tally PAM module.
#
FAILLOG_ENAB yes
#
# Enable display of unknown usernames when login failures are recorded.
#
# WARNING: Unknown usernames may become world readable.
# See #290803 and #298773 for details about how this could become a security
# concern
LOG_UNKFAIL_ENAB no
#
# Enable logging of successful logins
#
LOG_OK_LOGINS no
#
# Enable "syslog" logging of su activity - in addition to sulog file logging.
# SYSLOG_SG_ENAB does the same for newgrp and sg.
#
SYSLOG_SU_ENAB yes
SYSLOG_SG_ENAB yes
#
# If defined, all su activity is logged to this file.
#
#SULOG_FILE /var/log/sulog
#
# If defined, file which maps tty line to TERM environment parameter.
# Each line of the file is in a format something like "vt100 tty01".
#
#TTYTYPE_FILE /etc/ttytype
#
# If defined, login failures will be logged here in a utmp format
# last, when invoked as lastb, will read /var/log/btmp, so...
#
FTMP_FILE /var/log/btmp
#
# If defined, the command name to display when running "su -". For
# example, if this is defined as "su" then a "ps" will display the
# command is "-su". If not defined, then "ps" would display the
# name of the shell actually being run, e.g. something like "-sh".
#
SU_NAME su
#
# If defined, file which inhibits all the usual chatter during the login
# sequence. If a full pathname, then hushed mode will be enabled if the
# user's name or shell are found in the file. If not a full pathname, then
# hushed mode will be enabled if the file exists in the user's home directory.
#
HUSHLOGIN_FILE .hushlogin
#HUSHLOGIN_FILE /etc/hushlogins
#
# *REQUIRED* The default PATH settings, for superuser and normal users.
#
# (they are minimal, add the rest in the shell startup files)
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
#
# Terminal permissions
#
# TTYGROUP Login tty will be assigned this group ownership.
# TTYPERM Login tty will be set to this permission.
#
# If you have a "write" program which is "setgid" to a special group
# which owns the terminals, define TTYGROUP to the group number and
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
# TTYPERM to either 622 or 600.
#
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
# However, the default and recommended value for TTYPERM is still 0600
# to not allow anyone to write to anyone else console or terminal
# Users can still allow other people to write them by issuing
# the "mesg y" command.
TTYGROUP tty
TTYPERM 0600
#
# Login configuration initializations:
#
# ERASECHAR Terminal ERASE character ('\010' = backspace).
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
# UMASK Default "umask" value.
#
# The ERASECHAR and KILLCHAR are used only on System V machines.
#
# UMASK is the default umask value for pam_umask and is used by
# useradd and newusers to set the mode of the new home directories.
# 022 is the "historical" value in Debian for UMASK
# 027, or even 077, could be considered better for privacy
# There is no One True Answer here : each sysadmin must make up his/her
# mind.
#
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
# for private user groups, i. e. the uid is the same as gid, and username is
# the same as the primary group name: for these, the user permissions will be
# used as group permissions, e. g. 022 will become 002.
#
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
#
ERASECHAR 0177
KILLCHAR 025
UMASK 022
#
# Password aging controls:
#
# PASS_MAX_DAYS Maximum number of days a password may be used.
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
# PASS_WARN_AGE Number of days warning given before a password expires.
#
PASS_MAX_DAYS 99999
PASS_MIN_DAYS 0
PASS_WARN_AGE 7
#
# Min/max values for automatic uid selection in useradd
#
UID_MIN 1000
UID_MAX 60000
# System accounts
#SYS_UID_MIN 100
#SYS_UID_MAX 999
#
# Min/max values for automatic gid selection in groupadd
#
GID_MIN 1000
GID_MAX 60000
# System accounts
#SYS_GID_MIN 100
#SYS_GID_MAX 999
#
# Max number of login retries if password is bad. This will most likely be
# overriden by PAM, since the default pam_unix module has it's own built
# in of 3 retries. However, this is a safe fallback in case you are using
# an authentication module that does not enforce PAM_MAXTRIES.
#
LOGIN_RETRIES 5
#
# Max time in seconds for login
#
LOGIN_TIMEOUT 60
#
# Which fields may be changed by regular users using chfn - use
# any combination of letters "frwh" (full name, room number, work
# phone, home phone). If not defined, no changes are allowed.
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
#
CHFN_RESTRICT rwh
#
# Should login be allowed if we can't cd to the home directory?
# Default in no.
#
DEFAULT_HOME yes
#
# If defined, this command is run when removing a user.
# It should remove any at/cron/print jobs etc. owned by
# the user to be removed (passed as the first argument).
#
#USERDEL_CMD /usr/sbin/userdel_local
#
# If set to yes, userdel will remove the user's group if it contains no
# more members, and useradd will create by default a group with the name
# of the user.
#
# Other former uses of this variable such as setting the umask when
# user==primary group are not used in PAM environments, such as Debian
#
USERGROUPS_ENAB yes
#
# Instead of the real user shell, the program specified by this parameter
# will be launched, although its visible name (argv[0]) will be the shell's.
# The program may do whatever it wants (logging, additional authentification,
# banner, ...) before running the actual shell.
#
# FAKE_SHELL /bin/fakeshell
#
# If defined, either full pathname of a file containing device names or
# a ":" delimited list of device names. Root logins will be allowed only
# upon these devices.
#
# This variable is used by login and su.
#
#CONSOLE /etc/consoles
#CONSOLE console:tty01:tty02:tty03:tty04
#
# List of groups to add to the user's supplementary group set
# when logging in on the console (as determined by the CONSOLE
# setting). Default is none.
#
# Use with caution - it is possible for users to gain permanent
# access to these groups, even when not logged in on the console.
# How to do it is left as an exercise for the reader...
#
# This variable is used by login and su.
#
#CONSOLE_GROUPS floppy:audio:cdrom
#
# If set to "yes", new passwords will be encrypted using the MD5-based
# algorithm compatible with the one used by recent releases of FreeBSD.
# It supports passwords of unlimited length and longer salt strings.
# Set to "no" if you need to copy encrypted passwords to other systems
# which don't understand the new algorithm. Default is "no".
#
# This variable is deprecated. You should use ENCRYPT_METHOD.
#
#MD5_CRYPT_ENAB no
#
# If set to MD5 , MD5-based algorithm will be used for encrypting password
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
# If set to DES, DES-based algorithm will be used for encrypting password (default)
# Overrides the MD5_CRYPT_ENAB option
#
# Note: It is recommended to use a value consistent with
# the PAM modules configuration.
#
ENCRYPT_METHOD SHA512
#
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
#
# Define the number of SHA rounds.
# With a lot of rounds, it is more difficult to brute forcing the password.
# But note also that it more CPU resources will be needed to authenticate
# users.
#
# If not specified, the libc will choose the default number of rounds (5000).
# The values must be inside the 1000-999999999 range.
# If only one of the MIN or MAX values is set, then this value will be used.
# If MIN > MAX, the highest value will be used.
#
# SHA_CRYPT_MIN_ROUNDS 5000
# SHA_CRYPT_MAX_ROUNDS 5000
################# OBSOLETED BY PAM ##############
# #
# These options are now handled by PAM. Please #
# edit the appropriate file in /etc/pam.d/ to #
# enable the equivelants of them.
#
###############
#MOTD_FILE
#DIALUPS_CHECK_ENAB
#LASTLOG_ENAB
#MAIL_CHECK_ENAB
#OBSCURE_CHECKS_ENAB
#PORTTIME_CHECKS_ENAB
#SU_WHEEL_ONLY
#CRACKLIB_DICTPATH
#PASS_CHANGE_TRIES
#PASS_ALWAYS_WARN
#ENVIRON_FILE
#NOLOGINS_FILE
#ISSUE_FILE
#PASS_MIN_LEN
#PASS_MAX_LEN
#ULIMIT
#ENV_HZ
#CHFN_AUTH
#CHSH_AUTH
#FAIL_DELAY
################# OBSOLETED #######################
# #
# These options are no more handled by shadow. #
# #
# Shadow utilities will display a warning if they #
# still appear. #
# #
###################################################
# CLOSE_SESSIONS
# LOGIN_STRING
# NO_PASSWORD_CONSOLE
# QMAIL_DIR

View File

@ -1,39 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-password - password-related modules common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of modules that define the services to be
# used to change user passwords. The default is pam_unix.
# Explanation of pam_unix options:
# The "yescrypt" option enables
#hashed passwords using the yescrypt algorithm, introduced in Debian
#11. Without this option, the default is Unix crypt. Prior releases
#used the option "sha512"; if a shadow password hash will be shared
#between Debian 11 and older releases replace "yescrypt" with "sha512"
#for compatibility . The "obscure" option replaces the old
#`OBSCURE_CHECKS_ENAB' option in login.defs. See the pam_unix manpage
#for other options.
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
# here are the per-package modules (the "Primary" block)
password requisite pam_cracklib.so retry=3 maxrepeat=0 minlen=8 ucredit=-1 lcredit=-1 dcredit=-1 ocredit=-1 reject_username enforce_for_root
password required pam_pwhistory.so remember=10 use_authtok enforce_for_root
password [success=1 default=ignore] pam_unix.so obscure yescrypt
# here's the fallback if no module succeeds
password requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
password required pam_permit.so
# and here are more per-package modules (the "Additional" block)
# end of pam-auth-update config

View File

@ -1,340 +0,0 @@
#
# /etc/login.defs - Configuration control definitions for the login package.
#
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
# If unspecified, some arbitrary (and possibly incorrect) value will
# be assumed. All other items are optional - if not specified then
# the described action or option will be inhibited.
#
# Comment lines (lines beginning with "#") and blank lines are ignored.
#
# Modified for Linux. --marekm
# REQUIRED for useradd/userdel/usermod
# Directory where mailboxes reside, _or_ name of file, relative to the
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
# MAIL_DIR takes precedence.
#
# Essentially:
# - MAIL_DIR defines the location of users mail spool files
# (for mbox use) by appending the username to MAIL_DIR as defined
# below.
# - MAIL_FILE defines the location of the users mail spool files as the
# fully-qualified filename obtained by prepending the user home
# directory before $MAIL_FILE
#
# NOTE: This is no more used for setting up users MAIL environment variable
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
# job of the pam_mail PAM modules
# See default PAM configuration files provided for
# login, su, etc.
#
# This is a temporary situation: setting these variables will soon
# move to /etc/default/useradd and the variables will then be
# no more supported
MAIL_DIR /var/mail
#MAIL_FILE .mail
#
# Enable logging and display of /var/log/faillog login failure info.
# This option conflicts with the pam_tally PAM module.
#
FAILLOG_ENAB yes
#
# Enable display of unknown usernames when login failures are recorded.
#
# WARNING: Unknown usernames may become world readable.
# See #290803 and #298773 for details about how this could become a security
# concern
LOG_UNKFAIL_ENAB no
#
# Enable logging of successful logins
#
LOG_OK_LOGINS no
#
# Enable "syslog" logging of su activity - in addition to sulog file logging.
# SYSLOG_SG_ENAB does the same for newgrp and sg.
#
SYSLOG_SU_ENAB yes
SYSLOG_SG_ENAB yes
#
# If defined, all su activity is logged to this file.
#
#SULOG_FILE /var/log/sulog
#
# If defined, file which maps tty line to TERM environment parameter.
# Each line of the file is in a format something like "vt100 tty01".
#
#TTYTYPE_FILE /etc/ttytype
#
# If defined, login failures will be logged here in a utmp format
# last, when invoked as lastb, will read /var/log/btmp, so...
#
FTMP_FILE /var/log/btmp
#
# If defined, the command name to display when running "su -". For
# example, if this is defined as "su" then a "ps" will display the
# command is "-su". If not defined, then "ps" would display the
# name of the shell actually being run, e.g. something like "-sh".
#
SU_NAME su
#
# If defined, file which inhibits all the usual chatter during the login
# sequence. If a full pathname, then hushed mode will be enabled if the
# user's name or shell are found in the file. If not a full pathname, then
# hushed mode will be enabled if the file exists in the user's home directory.
#
HUSHLOGIN_FILE .hushlogin
#HUSHLOGIN_FILE /etc/hushlogins
#
# *REQUIRED* The default PATH settings, for superuser and normal users.
#
# (they are minimal, add the rest in the shell startup files)
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
#
# Terminal permissions
#
# TTYGROUP Login tty will be assigned this group ownership.
# TTYPERM Login tty will be set to this permission.
#
# If you have a "write" program which is "setgid" to a special group
# which owns the terminals, define TTYGROUP to the group number and
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
# TTYPERM to either 622 or 600.
#
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
# However, the default and recommended value for TTYPERM is still 0600
# to not allow anyone to write to anyone else console or terminal
# Users can still allow other people to write them by issuing
# the "mesg y" command.
TTYGROUP tty
TTYPERM 0600
#
# Login configuration initializations:
#
# ERASECHAR Terminal ERASE character ('\010' = backspace).
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
# UMASK Default "umask" value.
#
# The ERASECHAR and KILLCHAR are used only on System V machines.
#
# UMASK is the default umask value for pam_umask and is used by
# useradd and newusers to set the mode of the new home directories.
# 022 is the "historical" value in Debian for UMASK
# 027, or even 077, could be considered better for privacy
# There is no One True Answer here : each sysadmin must make up his/her
# mind.
#
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
# for private user groups, i. e. the uid is the same as gid, and username is
# the same as the primary group name: for these, the user permissions will be
# used as group permissions, e. g. 022 will become 002.
#
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
#
ERASECHAR 0177
KILLCHAR 025
UMASK 022
#
# Password aging controls:
#
# PASS_MAX_DAYS Maximum number of days a password may be used.
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
# PASS_WARN_AGE Number of days warning given before a password expires.
#
PASS_MAX_DAYS 180
PASS_MIN_DAYS 0
PASS_WARN_AGE 15
#
# Min/max values for automatic uid selection in useradd
#
UID_MIN 1000
UID_MAX 60000
# System accounts
#SYS_UID_MIN 100
#SYS_UID_MAX 999
#
# Min/max values for automatic gid selection in groupadd
#
GID_MIN 1000
GID_MAX 60000
# System accounts
#SYS_GID_MIN 100
#SYS_GID_MAX 999
#
# Max number of login retries if password is bad. This will most likely be
# overriden by PAM, since the default pam_unix module has it's own built
# in of 3 retries. However, this is a safe fallback in case you are using
# an authentication module that does not enforce PAM_MAXTRIES.
#
LOGIN_RETRIES 5
#
# Max time in seconds for login
#
LOGIN_TIMEOUT 60
#
# Which fields may be changed by regular users using chfn - use
# any combination of letters "frwh" (full name, room number, work
# phone, home phone). If not defined, no changes are allowed.
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
#
CHFN_RESTRICT rwh
#
# Should login be allowed if we can't cd to the home directory?
# Default in no.
#
DEFAULT_HOME yes
#
# If defined, this command is run when removing a user.
# It should remove any at/cron/print jobs etc. owned by
# the user to be removed (passed as the first argument).
#
#USERDEL_CMD /usr/sbin/userdel_local
#
# If set to yes, userdel will remove the user's group if it contains no
# more members, and useradd will create by default a group with the name
# of the user.
#
# Other former uses of this variable such as setting the umask when
# user==primary group are not used in PAM environments, such as Debian
#
USERGROUPS_ENAB yes
#
# Instead of the real user shell, the program specified by this parameter
# will be launched, although its visible name (argv[0]) will be the shell's.
# The program may do whatever it wants (logging, additional authentification,
# banner, ...) before running the actual shell.
#
# FAKE_SHELL /bin/fakeshell
#
# If defined, either full pathname of a file containing device names or
# a ":" delimited list of device names. Root logins will be allowed only
# upon these devices.
#
# This variable is used by login and su.
#
#CONSOLE /etc/consoles
#CONSOLE console:tty01:tty02:tty03:tty04
#
# List of groups to add to the user's supplementary group set
# when logging in on the console (as determined by the CONSOLE
# setting). Default is none.
#
# Use with caution - it is possible for users to gain permanent
# access to these groups, even when not logged in on the console.
# How to do it is left as an exercise for the reader...
#
# This variable is used by login and su.
#
#CONSOLE_GROUPS floppy:audio:cdrom
#
# If set to "yes", new passwords will be encrypted using the MD5-based
# algorithm compatible with the one used by recent releases of FreeBSD.
# It supports passwords of unlimited length and longer salt strings.
# Set to "no" if you need to copy encrypted passwords to other systems
# which don't understand the new algorithm. Default is "no".
#
# This variable is deprecated. You should use ENCRYPT_METHOD.
#
#MD5_CRYPT_ENAB no
#
# If set to MD5 , MD5-based algorithm will be used for encrypting password
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
# If set to DES, DES-based algorithm will be used for encrypting password (default)
# Overrides the MD5_CRYPT_ENAB option
#
# Note: It is recommended to use a value consistent with
# the PAM modules configuration.
#
ENCRYPT_METHOD SHA512
#
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
#
# Define the number of SHA rounds.
# With a lot of rounds, it is more difficult to brute forcing the password.
# But note also that it more CPU resources will be needed to authenticate
# users.
#
# If not specified, the libc will choose the default number of rounds (5000).
# The values must be inside the 1000-999999999 range.
# If only one of the MIN or MAX values is set, then this value will be used.
# If MIN > MAX, the highest value will be used.
#
# SHA_CRYPT_MIN_ROUNDS 5000
# SHA_CRYPT_MAX_ROUNDS 5000
################# OBSOLETED BY PAM ##############
# #
# These options are now handled by PAM. Please #
# edit the appropriate file in /etc/pam.d/ to #
# enable the equivelants of them.
#
###############
#MOTD_FILE
#DIALUPS_CHECK_ENAB
#LASTLOG_ENAB
#MAIL_CHECK_ENAB
#OBSCURE_CHECKS_ENAB
#PORTTIME_CHECKS_ENAB
#SU_WHEEL_ONLY
#CRACKLIB_DICTPATH
#PASS_CHANGE_TRIES
#PASS_ALWAYS_WARN
#ENVIRON_FILE
#NOLOGINS_FILE
#ISSUE_FILE
#PASS_MIN_LEN
#PASS_MAX_LEN
#ULIMIT
#ENV_HZ
#CHFN_AUTH
#CHSH_AUTH
#FAIL_DELAY
################# OBSOLETED #######################
# #
# These options are no more handled by shadow. #
# #
# Shadow utilities will display a warning if they #
# still appear. #
# #
###################################################
# CLOSE_SESSIONS
# LOGIN_STRING
# NO_PASSWORD_CONSOLE
# QMAIL_DIR

View File

@ -1,2 +0,0 @@
# server[:port] shared_secret timeout(s) source_ip vrf
[10.10.10.1]:1645 pass1 1

View File

@ -1,2 +0,0 @@
# server[:port] shared_secret timeout(s) source_ip vrf
[10.10.10.2]:1645 pass2 2

View File

@ -1,30 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
# root user can only be authenticated locally. Jump to local.
auth [success=2 default=ignore] pam_succeed_if.so user = root
# For the RADIUS servers, on success jump to the cache the MPL(Privilege)
auth [success=3 new_authtok_reqd=done default=ignore auth_err=die] pam_radius_auth.so conf=/etc/pam_radius_auth.d/10.10.10.1_1645.conf privilege_level protocol=pap retry=1 nas_ip_address=10.10.10.10 debug try_first_pass
auth [success=2 new_authtok_reqd=done default=ignore auth_err=die] pam_radius_auth.so conf=/etc/pam_radius_auth.d/10.10.10.2_1645.conf privilege_level protocol=chap retry=2 nas_ip_address=10.10.10.10 debug try_first_pass
# Local
auth [success=done new_authtok_reqd=done default=ignore auth_err=die maxtries=die] pam_unix.so nullok try_first_pass
auth requisite pam_deny.so
# Cache MPL(Privilege)
auth [success=1 default=ignore] pam_exec.so /usr/sbin/cache_radius
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,56 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,40 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,21 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok try_first_pass
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,55 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,41 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default
server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,21 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok try_first_pass
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,55 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,42 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default
server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,21 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok try_first_pass
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,55 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,44 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default
server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,21 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
#
# /etc/pam.d/common-auth- authentication settings common to all services
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok try_first_pass
#
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
# since the modules above will each just jump around
auth required pam_permit.so
# and here are more per-package modules (the "Additional" block)

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth-sonic
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,116 +0,0 @@
#
# The PAM configuration file for the Shadow `login' service
#
# Enforce a minimal delay in case of failure (in microseconds).
# (Replaces the `FAIL_DELAY' setting from login.defs)
# Note that other modules may require another minimal delay. (for example,
# to disable any delay, you should add the nodelay option to pam_unix)
auth optional pam_faildelay.so delay=3000000
# Outputs an issue file prior to each login prompt (Replaces the
# ISSUE_FILE option from login.defs). Uncomment for use
# auth required pam_issue.so issue=/etc/issue
# Disallows root logins except on tty's listed in /etc/securetty
# (Replaces the `CONSOLE' setting from login.defs)
#
# With the default control of this module:
# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
# root will not be prompted for a password on insecure lines.
# if an invalid username is entered, a password is prompted (but login
# will eventually be rejected)
#
# You can change it to a "requisite" module if you think root may mis-type
# her login and should not be prompted for a password in that case. But
# this will leave the system as vulnerable to user enumeration attacks.
#
# You can change it to a "required" module if you think it permits to
# guess valid user names of your system (invalid user names are considered
# as possibly being root on insecure lines), but root passwords may be
# communicated over insecure lines.
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
# Disallows other than root logins when /etc/nologin exists
# (Replaces the `NOLOGINS_FILE' option from login.defs)
auth requisite pam_nologin.so
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible
# that a module could execute code in the wrong domain.
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Sets the loginuid process attribute
session required pam_loginuid.so
# SELinux needs to intervene at login time to ensure that the process
# starts in the proper default security context. Only sessions which are
# intended to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# When the module is present, "required" would be sufficient (When SELinux
# is disabled, this returns success.)
# This module parses environment configuration file(s)
# and also allows you to use an extended config
# file /etc/security/pam_env.conf.
#
# parsing /etc/environment needs "readenv=1"
session required pam_env.so readenv=1
# locale variables are also kept into /etc/default/locale in etch
# reading this file *in addition to /etc/environment* does not hurt
session required pam_env.so readenv=1 envfile=/etc/default/locale
# Standard Un*x authentication.
@include common-auth
# This allows certain extra groups to be granted to a user
# based on things like time of day, tty, service, and user.
# Please edit /etc/security/group.conf to fit your needs
# (Replaces the `CONSOLE_GROUPS' option in login.defs)
auth optional pam_group.so
# Uncomment and edit /etc/security/time.conf if you need to set
# time restraint on logins.
# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
# as well as /etc/porttime)
# account requisite pam_time.so
# Uncomment and edit /etc/security/access.conf if you need to
# set access limits.
# (Replaces /etc/login.access file)
# account required pam_access.so
# Sets up user limits according to /etc/security/limits.conf
# (Replaces the use of /etc/limits in old login)
session required pam_limits.so
# Prints the last login info upon successful login
# (Replaces the `LASTLOG_ENAB' option from login.defs)
session optional pam_lastlog.so
# Prints the message of the day upon successful login.
# (Replaces the `MOTD_FILE' option in login.defs)
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Prints the status of the user's mailbox upon successful login
# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
#
# This also defines the MAIL environment variable
# However, userdel also needs MAIL_DIR and MAIL_FILE variables
# in /etc/login.defs to make sure that removing a user
# also removes the user's mail spool file.
# See comments in /etc/login.defs
session optional pam_mail.so standard
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x account and session
@include common-account
@include common-session
@include common-password

View File

@ -1,55 +0,0 @@
#THIS IS AN AUTO-GENERATED FILE
# Generated from: /usr/share/sonic/templates/radius_nss.conf.j2
# RADIUS NSS Configuration File
#
# Debug: on|off|trace
# Default: off
#
# debug=on
#
# User Privilege:
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# Eg:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/usr/bin/sonic-launch-shell
# user_priv=7;pw_info=netops;gid=999;group=docker;shell=/usr/bin/sonic-launch-shell
# user_priv=1;pw_info=operator;gid=100;group=docker;shell=/usr/bin/sonic-launch-shell
#
# many_to_one:
# y: Map RADIUS users to one local user per privilege.
# n: Create local user account on first successful authentication.
# Default: n
#
# Eg:
# many_to_one=y
#
# unconfirmed_disallow:
# y: Do not allow unconfirmed users (users created before authentication)
# n: Allow unconfirmed users.
# Default: n
# Eg:
# unconfirmed_disallow=y
#
# unconfirmed_ageout:
# <seconds>: Wait time before purging unconfirmed users
# Default: 600
#
# Eg:
# unconfirmed_ageout=900
#
# unconfirmed_regexp:
# <regexp>: The RE to match the command line of processes for which the
# creation of unconfirmed users are to be allowed.
# Default: (.*: <user> \[priv\])|(.*: \[accepted\])
# where: <user> is the unconfirmed user.
#

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth-sonic
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,55 +0,0 @@
# PAM configuration for the Secure Shell service
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -1,42 +0,0 @@
# Configuration for libnss-tacplus
# debug - If you want to open debug log, set it on
# Default: off
# debug=on
# local_accounting - If you want to local accounting, set it
# Default: None
# local_accounting
# tacacs_accounting - If you want to tacacs+ accounting, set it
# Default: None
# tacacs_accounting
tacacs_accounting
# local_authorization - If you want to local authorization, set it
# Default: None
# local_authorization
# tacacs_authorization - If you want to tacacs+ authorization, set it
# Default: None
# tacacs_authorization
tacacs_authorization
# src_ip - set source address of TACACS+ protocol packets
# Default: None (auto source ip address)
# src_ip=2.2.2.2
# server - set ip address, tcp port, secret string and timeout for TACACS+ servers
# Default: None (no TACACS+ server)
# server=1.1.1.1:49,secret=test,timeout=3
server=192.168.1.1:50,secret=dellsonic,timeout=10,vrf=default
server=192.168.1.2:51,secret=dellsonic1,timeout=15,vrf=mgmt
# user_priv - set the map between TACACS+ user privilege and local user's passwd
# Default:
# user_priv=15;pw_info=remote_user_su;gid=1000;group=sudo,docker;shell=/bin/bash
# user_priv=1;pw_info=remote_user;gid=999;group=docker;shell=/bin/bash
# many_to_one - create one local user for many TACACS+ users which has the same privilege
# Default: many_to_one=n
# many_to_one=y

View File

@ -1,244 +0,0 @@
"""
hostcfgd test password hardening vector
"""
HOSTCFGD_TEST_PASSWH_VECTOR = [
[
"PASSWORD_HARDENING",
{
"default_values":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "disabled",
"expiration": "180",
"expiration_warning": "15",
"history_cnt": "10",
"len_min": "8",
"reject_user_passw_match": "True",
"lower_class": "True",
"upper_class": "True",
"digits_class": "True",
"special_class": "True"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
}
},
"enable_feature":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "enabled",
"expiration": "180",
"expiration_warning": "15",
"history_cnt": "10",
"len_min": "8",
"reject_user_passw_match": "True",
"lower_class": "True",
"upper_class": "True",
"digits_class": "True",
"special_class": "True"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
}
},
"enable_digits_class":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "enabled",
"expiration": "0",
"expiration_warning": "0",
"history_cnt": "0",
"len_min": "8",
"reject_user_passw_match": "False",
"lower_class": "False",
"upper_class": "False",
"digits_class": "True",
"special_class": "False"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
}
},
"enable_lower_class":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "enabled",
"expiration": "0",
"expiration_warning": "0",
"history_cnt": "0",
"len_min": "8",
"reject_user_passw_match": "False",
"lower_class": "True",
"upper_class": "False",
"digits_class": "False",
"special_class": "False"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
},
"enable_upper_class":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "enabled",
"expiration": "0",
"expiration_warning": "0",
"history_cnt": "0",
"len_min": "8",
"reject_user_passw_match": "False",
"lower_class": "False",
"upper_class": "True",
"digits_class": "False",
"special_class": "False"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
}
},
"enable_special_class":{
"PASSW_HARDENING": {
"POLICIES":{
"state": "enabled",
"expiration": "0",
"expiration_warning": "0",
"history_cnt": "0",
"len_min": "8",
"reject_user_passw_match": "False",
"lower_class": "False",
"upper_class": "False",
"digits_class": "False",
"special_class": "True"
}
},
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
}
}
}
]
]

View File

@ -1,181 +0,0 @@
from unittest.mock import call
"""
hostcfgd test radius vector
"""
HOSTCFGD_TEST_RADIUS_VECTOR = [
[
"RADIUS",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "radius,local",
"debug": "True",
}
},
"RADIUS": {
"global": {
"nas_ip": "10.10.10.10",
"auth_port": "1645",
"auth_type": "mschapv2",
"retransmit": "2",
"timeout": "3",
"passkey": "pass",
}
},
"RADIUS_SERVER": {
"10.10.10.1": {
"auth_type": "pap",
"retransmit": "1",
"timeout": "1",
"passkey": "pass1",
},
"10.10.10.2": {
"auth_type": "chap",
"retransmit": "2",
"timeout": "2",
"passkey": "pass2",
}
},
},
"expected_config_db": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"AAA": {
"authentication": {
"login": "radius,local",
"debug": "True",
}
},
"RADIUS": {
"global": {
"nas_ip": "10.10.10.10",
"auth_port": "1645",
"auth_type": "mschapv2",
"retransmit": "2",
"timeout": "3",
"passkey": "pass",
}
},
"RADIUS_SERVER": {
"10.10.10.1": {
"auth_type": "pap",
"retransmit": "1",
"timeout": "1",
"passkey": "pass1",
},
"10.10.10.2": {
"auth_type": "chap",
"retransmit": "2",
"timeout": "2",
"passkey": "pass2",
}
},
},
"expected_subprocess_calls": [
call("service aaastatsd start", shell=True),
],
}
],
[
"LOCAL",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "local",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "local",
"debug": "True",
}
},
},
"expected_config_db": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "local",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"AAA": {
"authentication": {
"login": "local",
"debug": "True",
}
},
},
"expected_subprocess_calls": [
call("service aaastatsd start", shell=True),
],
},
],
]

View File

@ -1,260 +0,0 @@
from unittest.mock import call
"""
hostcfgd test tacacs vector
"""
HOSTCFGD_TEST_TACACS_VECTOR = [
[
"TACACS",
{
"config_db_local": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "local"
},
"authorization": {
"login": "local"
},
"accounting": {
"login": "local"
}
},
"TACPLUS": {
"global": {
"auth_type": "chap",
"timeout": 5,
"passkey": "dellsonic",
"src_intf": "Ethernet0"
}
},
"TACPLUS_SERVER": {
"192.168.1.1" : {
"priority": 5,
"tcp_port": 50,
"timeout": 10,
"auth_type": "chap",
"passkey": "dellsonic",
"vrf": "default"
},
"192.168.1.2" : {
"priority": 2,
"tcp_port": 51,
"timeout": 15,
"auth_type": "pap",
"passkey": "dellsonic1",
"vrf": "mgmt"
}
},
},
"config_db_tacacs": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "local"
},
"authorization": {
"login": "tacacs+"
},
"accounting": {
"login": "tacacs+"
}
},
"TACPLUS": {
"global": {
"auth_type": "chap",
"timeout": 5,
"passkey": "dellsonic",
"src_intf": "Ethernet0"
}
},
"TACPLUS_SERVER": {
"192.168.1.1" : {
"priority": 5,
"tcp_port": 50,
"timeout": 10,
"auth_type": "chap",
"passkey": "dellsonic",
"vrf": "default"
},
"192.168.1.2" : {
"priority": 2,
"tcp_port": 51,
"timeout": 15,
"auth_type": "pap",
"passkey": "dellsonic1",
"vrf": "mgmt"
}
},
},
"config_db_local_and_tacacs": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "local"
},
"authorization": {
"login": "tacacs+ local"
},
"accounting": {
"login": "tacacs+ local"
}
},
"TACPLUS": {
"global": {
"auth_type": "chap",
"timeout": 5,
"passkey": "dellsonic",
"src_intf": "Ethernet0"
}
},
"TACPLUS_SERVER": {
"192.168.1.1" : {
"priority": 5,
"tcp_port": 50,
"timeout": 10,
"auth_type": "chap",
"passkey": "dellsonic",
"vrf": "default"
},
"192.168.1.2" : {
"priority": 2,
"tcp_port": 51,
"timeout": 15,
"auth_type": "pap",
"passkey": "dellsonic1",
"vrf": "mgmt"
}
},
},
"config_db_disable_accounting": {
"DEVICE_METADATA": {
"localhost": {
"hostname": "radius",
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"AAA": {
"authentication": {
"login": "local"
},
"authorization": {
"login": "local"
},
"accounting": {
"login": "disable"
}
},
"TACPLUS": {
"global": {
"auth_type": "chap",
"timeout": 5,
"passkey": "dellsonic",
"src_intf": "Ethernet0"
}
},
"TACPLUS_SERVER": {
"192.168.1.1" : {
"priority": 5,
"tcp_port": 50,
"timeout": 10,
"auth_type": "chap",
"passkey": "dellsonic",
"vrf": "default"
},
"192.168.1.2" : {
"priority": 2,
"tcp_port": 51,
"timeout": 15,
"auth_type": "pap",
"passkey": "dellsonic1",
"vrf": "mgmt"
}
},
}
}
]
]

View File

@ -1,567 +0,0 @@
from unittest.mock import call
"""
hostcfgd test vector
"""
HOSTCFGD_TEST_VECTOR = [
[
"DualTorCase",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"expected_config_db": {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "enabled"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"enable_feature_subprocess_calls": [
call("sudo systemctl unmask dhcp_relay.service", shell=True),
call("sudo systemctl enable dhcp_relay.service", shell=True),
call("sudo systemctl start dhcp_relay.service", shell=True),
call("sudo systemctl unmask mux.service", shell=True),
call("sudo systemctl enable mux.service", shell=True),
call("sudo systemctl start mux.service", shell=True),
call("sudo systemctl unmask telemetry.service", shell=True),
call("sudo systemctl unmask telemetry.timer", shell=True),
call("sudo systemctl enable telemetry.timer", shell=True),
call("sudo systemctl start telemetry.timer", shell=True),
],
"daemon_reload_subprocess_call": [
call("sudo systemctl daemon-reload", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error')
},
},
],
[
"SingleToRCase",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"type": "ToR",
}
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
"sflow": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "always_enabled"
},
},
},
"expected_config_db": {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "disabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "always_disabled"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
"sflow": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "always_enabled"
},
},
},
"enable_feature_subprocess_calls": [
call("sudo systemctl stop mux.service", shell=True),
call("sudo systemctl disable mux.service", shell=True),
call("sudo systemctl mask mux.service", shell=True),
call("sudo systemctl unmask telemetry.service", shell=True),
call("sudo systemctl unmask telemetry.timer", shell=True),
call("sudo systemctl enable telemetry.timer", shell=True),
call("sudo systemctl start telemetry.timer", shell=True),
call("sudo systemctl unmask sflow.service", shell=True),
call("sudo systemctl enable sflow.service", shell=True),
call("sudo systemctl start sflow.service", shell=True),
],
"daemon_reload_subprocess_call": [
call("sudo systemctl daemon-reload", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error')
},
},
],
[
"T1Case",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"type": "T1",
}
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"expected_config_db": {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "disabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "always_disabled"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"enable_feature_subprocess_calls": [
call("sudo systemctl stop mux.service", shell=True),
call("sudo systemctl disable mux.service", shell=True),
call("sudo systemctl mask mux.service", shell=True),
call("sudo systemctl unmask telemetry.service", shell=True),
call("sudo systemctl unmask telemetry.timer", shell=True),
call("sudo systemctl enable telemetry.timer", shell=True),
call("sudo systemctl start telemetry.timer", shell=True),
],
"daemon_reload_subprocess_call": [
call("sudo systemctl daemon-reload", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error')
},
},
],
[
"SingleToRCase_DHCP_Relay_Enabled",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"type": "ToR",
}
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"expected_config_db": {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "always_disabled"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"enable_feature_subprocess_calls": [
call("sudo systemctl unmask dhcp_relay.service", shell=True),
call("sudo systemctl enable dhcp_relay.service", shell=True),
call("sudo systemctl start dhcp_relay.service", shell=True),
call("sudo systemctl stop mux.service", shell=True),
call("sudo systemctl disable mux.service", shell=True),
call("sudo systemctl mask mux.service", shell=True),
call("sudo systemctl unmask telemetry.service", shell=True),
call("sudo systemctl unmask telemetry.timer", shell=True),
call("sudo systemctl enable telemetry.timer", shell=True),
call("sudo systemctl start telemetry.timer", shell=True),
],
"daemon_reload_subprocess_call": [
call("sudo systemctl daemon-reload", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('output', 'error')
},
},
],
[
"DualTorCaseWithNoSystemCalls",
{
"config_db": {
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
},
"KDUMP": {
"config": {
"enabled": "false",
"num_dumps": "3",
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M"
}
},
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"expected_config_db": {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "enabled"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
},
"enable_feature_subprocess_calls": [],
"daemon_reload_subprocess_call": [
call("sudo systemctl daemon-reload", shell=True),
],
"popen_attributes": {
'communicate.return_value': ('enabled', 'error')
},
}
]
]
HOSTCFG_DAEMON_CFG_DB = {
"FEATURE": {
"dhcp_relay": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] != 'ToRRouter') %}enabled{% else %}disabled{% endif %}"
},
"mux": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "False",
"high_mem_alert": "disabled",
"set_owner": "local",
"state": "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}"
},
"telemetry": {
"auto_restart": "enabled",
"has_global_scope": "True",
"has_per_asic_scope": "False",
"has_timer": "True",
"high_mem_alert": "disabled",
"set_owner": "kube",
"state": "enabled",
"status": "enabled"
},
},
"KDUMP": {
"config": {
}
},
"NTP": {
"global": {
"vrf": "default",
"src_intf": "eth0;Loopback0"
}
},
"NTP_SERVER": {
"0.debian.pool.ntp.org": {}
},
"LOOPBACK_INTERFACE": {
"Loopback0|10.184.8.233/32": {
"scope": "global",
"family": "IPv4"
}
},
"DEVICE_METADATA": {
"localhost": {
"subtype": "DualToR",
"type": "ToRRouter",
}
}
}

View File

@ -1,24 +0,0 @@
class MockConnector(object):
STATE_DB = None
data = {}
def __init__(self, host):
pass
def connect(self, db_id):
pass
def get(self, db_id, key, field):
return MockConnector.data[key][field]
def keys(self, db_id, pattern):
match = pattern.split('*')[0]
ret = []
for key in MockConnector.data.keys():
if match in key:
ret.append(key)
return ret
def get_all(self, db_id, key):
return MockConnector.data[key]

View File

@ -1,43 +0,0 @@
import sys
import os
import pytest
from swsscommon import swsscommon
from sonic_py_common.general import load_module_from_source
from .mock_connector import MockConnector
swsscommon.SonicV2Connector = MockConnector
test_path = os.path.dirname(os.path.abspath(__file__))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
# Load the file under test
procdockerstatsd_path = os.path.join(scripts_path, 'procdockerstatsd')
procdockerstatsd = load_module_from_source('procdockerstatsd', procdockerstatsd_path)
class TestProcDockerStatsDaemon(object):
def test_convert_to_bytes(self):
test_data = [
('1B', 1),
('500B', 500),
('1KB', 1000),
('500KB', 500000),
('1MB', 1000000),
('500MB', 500000000),
('1MiB', 1048576),
('500MiB', 524288000),
('66.41MiB', 69635932),
('333.6MiB', 349804954),
('1GiB', 1073741824),
('500GiB', 536870912000),
('7.751GiB', 8322572878)
]
pdstatsd = procdockerstatsd.ProcDockerStats(procdockerstatsd.SYSLOG_IDENTIFIER)
for test_input, expected_output in test_data:
res = pdstatsd.convert_to_bytes(test_input)
assert res == expected_output