[Python] Align files in root dir, dockers/ and files/ with PEP8 standards (#6109)

**- Why I did it**

Align style with slightly modified PEP8 standards (extend maximum line length to 120 chars). This will also help in the transition to Python 3, where it is more strict about whitespace, plus it helps unify style among the SONiC codebase. Will tackle other directories in separate PRs.

**- How I did it**

Using `autopep8 --in-place --max-line-length 120` and some manual tweaks.
This commit is contained in:
Joe LeVeque 2020-12-03 15:57:50 -08:00 committed by GitHub
parent 0e101247ac
commit 905a5127bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 102 additions and 75 deletions

View File

@ -65,5 +65,6 @@ def main():
p.sendline('sync')
p.expect([cmd_prompt])
if __name__ == '__main__':
main()

View File

@ -6,6 +6,7 @@ import subprocess
import syslog
from swsssdk import ConfigDBConnector
class BGPConfigDaemon:
def __init__(self):
@ -15,17 +16,18 @@ class BGPConfigDaemon:
self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR')
def __run_command(self, command):
# print command
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
p.wait()
if p.returncode != 0:
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(p.returncode, command, stdout))
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(
p.returncode, command, stdout))
def metadata_handler(self, key, data):
if key == 'localhost' and data.has_key('bgp_asn'):
if data['bgp_asn'] != self.bgp_asn:
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(data['bgp_asn'], self.bgp_asn))
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(
data['bgp_asn'], self.bgp_asn))
self.__run_command("supervisorctl restart start.sh")
self.__run_command("service quagga restart")
self.bgp_asn = data['bgp_asn']
@ -38,22 +40,25 @@ class BGPConfigDaemon:
self.__run_command(command)
self.bgp_neighbor.pop(key)
else:
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(self.bgp_asn, key, data['asn'])
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(
self.bgp_asn, key, data['asn'])
self.__run_command(command)
if data.has_key('name'):
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(self.bgp_asn, key, data['name'])
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(
self.bgp_asn, key, data['name'])
self.__run_command(command)
if data.has_key('admin_status'):
command_mod = 'no ' if data['admin_status'] == 'up' else ''
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(self.bgp_asn, command_mod, key)
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(
self.bgp_asn, command_mod, key)
self.__run_command(command)
self.bgp_neighbor[key] = data
def start(self):
self.config_db.subscribe('BGP_NEIGHBOR',
lambda table, key, data: self.bgp_handler(key, data))
self.config_db.subscribe('BGP_NEIGHBOR',
lambda table, key, data: self.bgp_handler(key, data))
self.config_db.subscribe('DEVICE_METADATA',
lambda table, key, data: self.metadata_handler(key, data))
lambda table, key, data: self.metadata_handler(key, data))
self.config_db.listen()
@ -61,5 +66,6 @@ def main():
daemon = BGPConfigDaemon()
daemon.start()
if __name__ == "__main__":
main()

View File

@ -81,9 +81,10 @@ class LldpManager(daemon_base.DaemonBase):
else:
# Retrieve PortInitDone entry from the Port table
(init_status, init_fvp) = port_table.get("PortInitDone")
#The initialization procedure is done, but don't have this port entry
# The initialization procedure is done, but don't have this port entry
if init_status:
self.log_error("Port '{}' not found in {} table in App DB".format(port_name, swsscommon.APP_PORT_TABLE_NAME))
self.log_error("Port '{}' not found in {} table in App DB".format(
port_name, swsscommon.APP_PORT_TABLE_NAME))
return False
def generate_pending_lldp_config_cmd_for_port(self, port_name):
@ -105,12 +106,13 @@ class LldpManager(daemon_base.DaemonBase):
if not port_alias:
self.log_info("Unable to retrieve port alias for port '{}'. Using port name instead.".format(port_name))
port_alias = port_name
# Get the port description. If None or empty string, we'll skip this configuration
# Get the port description. If None or empty string, we'll skip this configuration
port_desc = port_table_dict.get("description")
else:
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(port_name, swsscommon.CFG_PORT_TABLE_NAME))
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(
port_name, swsscommon.CFG_PORT_TABLE_NAME))
port_alias = port_name
lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias)
@ -233,6 +235,7 @@ class LldpManager(daemon_base.DaemonBase):
# ============================= Functions =============================
def main():
# Instantiate a LldpManager object
lldpmgr = LldpManager(SYSLOG_IDENTIFIER)
@ -242,16 +245,19 @@ def main():
lldpmgr.run()
def run_cmd(self, cmd):
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return proc.returncode, stderr
def check_timeout(self, start_time):
if time.time() - start_time > PORT_INIT_TIMEOUT:
self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT))
return True
return False
if __name__ == "__main__":
main()

View File

@ -23,7 +23,7 @@ SYSLOG_IDENTIFIER = os.path.basename(__file__)
WARM_BOOT_FILE_DIR = '/var/warmboot/nat/'
NAT_WARM_BOOT_FILE = 'nat_entries.dump'
IP_PROTO_TCP = '6'
IP_PROTO_TCP = '6'
MATCH_CONNTRACK_ENTRY = '^(\w+)\s+(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+)'
@ -38,8 +38,8 @@ def add_nat_conntrack_entry_in_kernel(ipproto, srcip, dstip, srcport, dstport, n
if (ipproto == IP_PROTO_TCP):
state = ' --state ESTABLISHED '
ctcmd = 'conntrack -I -n ' + natdstip + ':' + natdstport + ' -g ' + natsrcip + ':' + natsrcport + \
' --protonum ' + ipproto + state + ' --timeout 432000 --src ' + srcip + ' --sport ' + srcport + \
' --dst ' + dstip + ' --dport ' + dstport + ' -u ASSURED'
' --protonum ' + ipproto + state + ' --timeout 432000 --src ' + srcip + ' --sport ' + srcport + \
' --dst ' + dstip + ' --dport ' + dstport + ' -u ASSURED'
subprocess.call(ctcmd, shell=True)
logger.log_info("Restored NAT entry: {}".format(ctcmd))
@ -65,7 +65,7 @@ def restore_update_kernel_nat_entries(filename):
cmdargs = list(ctline.pop(0))
proto = cmdargs.pop(0)
if proto not in ('tcp', 'udp'):
continue
continue
add_nat_conntrack_entry_in_kernel(*cmdargs)
@ -97,7 +97,7 @@ def main():
sys.exit(1)
# Remove the dump file after restoration
os.remove(WARM_BOOT_FILE_DIR + NAT_WARM_BOOT_FILE)
os.remove(WARM_BOOT_FILE_DIR + NAT_WARM_BOOT_FILE)
# set statedb to signal other processes like natsyncd
set_statedb_nat_restore_done()

View File

@ -4,7 +4,7 @@ import time
import swsssdk
# ALPHA defines the size of the window over which we calculate the average value. ALPHA is 2/(N+1) where N is the interval(window size)
# In this case we configure the window to be 10s. This way if we have a huge 1s spike in traffic,
# In this case we configure the window to be 10s. This way if we have a huge 1s spike in traffic,
# the average rate value will show a curve descending from the spike to the usual rate over approximately 10s.
DEFAULT_SMOOTH_INTERVAL = '10'
DEFAULT_ALPHA = '0.18'
@ -17,7 +17,7 @@ def enable_counter_group(db, name):
def enable_rates():
# set the default interval for rates
# set the default interval for rates
counters_db = swsssdk.SonicV2Connector()
counters_db.connect('COUNTERS_DB')
counters_db.set('COUNTERS_DB', 'RATES:PORT', 'PORT_SMOOTH_INTERVAL', DEFAULT_SMOOTH_INTERVAL)
@ -59,4 +59,3 @@ def main():
if __name__ == '__main__':
main()

View File

@ -14,6 +14,7 @@ SYSLOG_IDENTIFIER = 'port_index_mapper'
logger = Logger(SYSLOG_IDENTIFIER)
logger.set_min_log_priority_info()
class PortIndexMapper(object):
def __init__(self):
@ -106,6 +107,7 @@ def main():
port_mapper.populate()
port_mapper.listen()
if __name__ == '__main__':
rc = 0
try:

View File

@ -14,16 +14,19 @@ CONFIG_FILES = {
OUTPUT_FILE = os.path.abspath('./asic_config_checksum')
def log_info(msg):
syslog.openlog(SYSLOG_IDENTIFIER)
syslog.syslog(syslog.LOG_INFO, msg)
syslog.closelog()
def log_error(msg):
syslog.openlog(SYSLOG_IDENTIFIER)
syslog.syslog(syslog.LOG_ERR, msg)
syslog.closelog()
def get_config_files(config_file_map):
'''
Generates a list of absolute paths to ASIC config files.
@ -34,11 +37,12 @@ def get_config_files(config_file_map):
config_files.append(os.path.join(path, config_file))
return config_files
def generate_checksum(checksum_files):
'''
Generates a checksum for a given list of files. Returns None if an error
occurs while reading the files.
NOTE: The checksum is performed in the order provided. This function does
NOT do any re-ordering of the files before creating the checksum.
'''
@ -54,6 +58,7 @@ def generate_checksum(checksum_files):
return checksum.hexdigest()
def main():
config_files = sorted(get_config_files(CONFIG_FILES))
checksum = generate_checksum(config_files)
@ -63,5 +68,6 @@ def main():
with open(OUTPUT_FILE, 'w') as output:
output.write(checksum + '\n')
if __name__ == '__main__':
main()

View File

@ -31,7 +31,7 @@ cwd = []
HOURS_4 = (4 * 60 * 60)
PAUSE_ON_FAIL = (60 * 60)
WAIT_FILE_WRITE1 = (10 * 60)
WAIT_FILE_WRITE2= (5 * 60)
WAIT_FILE_WRITE2 = (5 * 60)
POLL_SLEEP = (60 * 60)
MAX_RETRIES = 5
UPLOAD_PREFIX = "UPLOADED_"
@ -62,7 +62,7 @@ class config:
while not os.path.exists(RC_FILE):
# Wait here until service restart
logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4)
time.sleep(HOURS_4)
with open(RC_FILE, 'r') as f:
self.parsed_data = json.load(f)
@ -137,7 +137,7 @@ class Handler(FileSystemEventHandler):
while True:
# Wait here until service restart
logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4)
time.sleep(HOURS_4)
with open("/etc/sonic/sonic_version.yml", 'r') as stream:
l = yaml.safe_load(stream)
@ -163,11 +163,10 @@ class Handler(FileSystemEventHandler):
elif event.event_type == 'created':
# Take any action here when a file is first created.
logger.log_debug("Received create event - " + event.src_path)
logger.log_debug("Received create event - " + event.src_path)
Handler.wait_for_file_write_complete(event.src_path)
Handler.handle_file(event.src_path)
@staticmethod
def wait_for_file_write_complete(path):
mtime = 0
@ -186,8 +185,7 @@ class Handler(FileSystemEventHandler):
raise Exception("Dump file creation is too slow: " + path)
# Give up as something is terribly wrong with this file.
logger.log_debug("File write complete - " + path)
logger.log_debug("File write complete - " + path)
@staticmethod
def handle_file(path):
@ -202,7 +200,7 @@ class Handler(FileSystemEventHandler):
tarf_name = fname + ".tar.gz"
cfg.get_core_info(path, hostname)
tar = tarfile.open(tarf_name, "w:gz")
for e in metafiles:
tar.add(metafiles[e])
@ -212,14 +210,14 @@ class Handler(FileSystemEventHandler):
Handler.upload_file(tarf_name, tarf_name, path)
logger.log_debug("File uploaded - " + path)
logger.log_debug("File uploaded - " + path)
os.chdir(INIT_CWD)
@staticmethod
def upload_file(fname, fpath, coref):
daemonname = fname.split(".")[0]
i = 0
while True:
try:
svc = FileService(account_name=acctname, account_key=acctkey)
@ -239,13 +237,13 @@ class Handler(FileSystemEventHandler):
break
except Exception as ex:
logger.log_error("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i))
logger.log_error("core uploader failed: Failed during upload (" +
coref + ") err: (" + str(ex) + ") retry:" + str(i))
if not os.path.exists(fpath):
break
i += 1
time.sleep(PAUSE_ON_FAIL)
@staticmethod
def scan():
for e in os.listdir(CORE_FILE_PATH):
@ -262,4 +260,3 @@ if __name__ == '__main__':
w.run()
except Exception as e:
logger.log_err("core uploader failed: " + str(e) + " Exiting ...")

View File

@ -44,6 +44,7 @@ g_thread_exit_event = threading.Event()
g_service = []
g_dep_services = []
def wait_for_container(docker_client, container_name):
while True:
while docker_client.inspect_container(container_name)['State']['Status'] != "running":
@ -56,26 +57,27 @@ def wait_for_container(docker_client, container_name):
# If this is a dependent service and warm restart is enabled for the system/container,
# OR if the system is going through a fast-reboot, DON'T signal main thread to exit
if (container_name in g_dep_services and
(device_info.is_warm_restart_enabled(container_name) or device_info.is_fast_reboot_enabled())):
(device_info.is_warm_restart_enabled(container_name) or device_info.is_fast_reboot_enabled())):
continue
# Signal the main thread to exit
g_thread_exit_event.set()
def main():
thread_list = []
docker_client = APIClient(base_url='unix://var/run/docker.sock')
parser = argparse.ArgumentParser(description='Wait for dependent docker services',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
parser = argparse.ArgumentParser(description='Wait for dependent docker services',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
docker-wait-any -s swss -d syncd teamd
""")
parser.add_argument('-s','--service', nargs='+', default=None, help='name of the service')
parser.add_argument('-d','--dependent', nargs='*', default=None, help='other dependent services')
parser.add_argument('-s', '--service', nargs='+', default=None, help='name of the service')
parser.add_argument('-d', '--dependent', nargs='*', default=None, help='other dependent services')
args = parser.parse_args()
global g_service
@ -102,5 +104,6 @@ Examples:
g_thread_exit_event.wait()
sys.exit(0)
if __name__ == '__main__':
main()

View File

@ -10,6 +10,7 @@ SYSLOG_IDENTIFIER = 'core_cleanup.py'
CORE_FILE_DIR = '/var/core/'
MAX_CORE_FILES = 4
def main():
logger = Logger(SYSLOG_IDENTIFIER)
logger.set_min_log_priority_info()
@ -28,7 +29,7 @@ def main():
curr_files.append(f)
if len(curr_files) > MAX_CORE_FILES:
curr_files.sort(reverse = True, key = lambda x: datetime.utcfromtimestamp(int(x.split('.')[1])))
curr_files.sort(reverse=True, key=lambda x: datetime.utcfromtimestamp(int(x.split('.')[1])))
oldest_core = curr_files[MAX_CORE_FILES]
logger.log_info('Deleting {}'.format(oldest_core))
try:
@ -39,5 +40,6 @@ def main():
logger.log_info('Finished cleaning up core files')
if __name__ == '__main__':
main()

View File

@ -21,6 +21,8 @@ CRITICAL_PROCESSES_FILE = '/etc/supervisor/critical_processes'
FEATURE_TABLE_NAME = 'FEATURE'
# Read the critical processes/group names from CRITICAL_PROCESSES_FILE
def get_critical_group_and_process_list():
critical_group_list = []
critical_process_list = []
@ -29,7 +31,8 @@ def get_critical_group_and_process_list():
for line in file:
line_info = line.strip(' \n').split(':')
if len(line_info) != 2:
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
syslog.syslog(syslog.LOG_ERR,
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
sys.exit(5)
identifier_key = line_info[0].strip()
@ -39,11 +42,13 @@ def get_critical_group_and_process_list():
elif identifier_key == "program" and identifier_value:
critical_process_list.append(identifier_value)
else:
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
sys.exit(6)
syslog.syslog(syslog.LOG_ERR,
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
sys.exit(6)
return critical_group_list, critical_process_list
def main(argv):
container_name = None
opts, args = getopt.getopt(argv, "c:", ["container-name="])
@ -90,13 +95,14 @@ def main(argv):
restart_feature = features_table[container_name].get('auto_restart')
if not restart_feature:
syslog.syslog(syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
syslog.syslog(
syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
sys.exit(4)
# If auto-restart feature is not disabled and at the same time
# a critical process exited unexpectedly, terminate supervisor
if (restart_feature != 'disabled' and expected == 0 and
(processname in critical_process_list or groupname in critical_group_list)):
(processname in critical_process_list or groupname in critical_group_list)):
MSG_FORMAT_STR = "Process {} exited unxepectedly. Terminating supervisor..."
msg = MSG_FORMAT_STR.format(payload_headers['processname'])
syslog.syslog(syslog.LOG_INFO, msg)

View File

@ -11,59 +11,58 @@ chassis_db = 'CHASSIS_APP_DB'
def main():
parser = argparse.ArgumentParser(description=
"Update chassis_db config from database-config.json")
parser = argparse.ArgumentParser(description="Update chassis_db config from database-config.json")
parser.add_argument("-j", "--json", help="databse-config json file", nargs='?',
const=database_config_file)
parser.add_argument("-p", "--port", help="update port number", nargs='?' )
const=database_config_file)
parser.add_argument("-p", "--port", help="update port number", nargs='?')
group = parser.add_mutually_exclusive_group()
group.add_argument("-k", "--keep", help="keep configuration", action='store_true' )
group.add_argument("-d", "--delete", help="delete configuration", action='store_true' )
group.add_argument("-k", "--keep", help="keep configuration", action='store_true')
group.add_argument("-d", "--delete", help="delete configuration", action='store_true')
args = parser.parse_args()
jsonfile = ""
if args.json != None:
jsonfile = args.json
else:
return
return
if args.port != None:
port_number = args.port
else:
port_number = ""
if args.keep:
keep_config = True
keep_config = True
else:
keep_config = False
keep_config = False
if args.delete:
delete_config = True
delete_config = True
else:
delete_config = False
delete_config = False
data = {}
data_keep = {}
if os.path.isfile(jsonfile):
with open(jsonfile, "r") as read_file:
data = json.load(read_file)
else:
syslog.syslog(syslog.LOG_ERR,
'config file {} does notexist'.format(jsonfile))
return
syslog.syslog(syslog.LOG_ERR,
'config file {} does notexist'.format(jsonfile))
return
if 'INSTANCES' in data and redis_chassis in data['INSTANCES']:
data_keep['INSTANCES'] = {}
data_keep['INSTANCES'][redis_chassis] = data['INSTANCES'][redis_chassis]
if delete_config:
del data['INSTANCES'][redis_chassis]
data_keep['INSTANCES'] = {}
data_keep['INSTANCES'][redis_chassis] = data['INSTANCES'][redis_chassis]
if delete_config:
del data['INSTANCES'][redis_chassis]
if 'DATABASES' in data and chassis_db in data['DATABASES']:
data_keep['DATABASES'] = {}
data_keep['DATABASES'][chassis_db] = data['DATABASES'][chassis_db]
if delete_config:
del data['DATABASES'][chassis_db]
data_keep['DATABASES'] = {}
data_keep['DATABASES'][chassis_db] = data['DATABASES'][chassis_db]
if delete_config:
del data['DATABASES'][chassis_db]
with open(jsonfile, "w") as write_file:
data_publish = data_keep if keep_config else data
if port_number:
data_publish['INSTANCES']['redis_chassis']['port'] = int(port_number)
json.dump(data_publish, write_file, indent=4, separators=(',', ': '))
data_publish = data_keep if keep_config else data
if port_number:
data_publish['INSTANCES']['redis_chassis']['port'] = int(port_number)
json.dump(data_publish, write_file, indent=4, separators=(',', ': '))
syslog.syslog(syslog.LOG_INFO,
'remove chassis_db from config file {}'.format(jsonfile))
'remove chassis_db from config file {}'.format(jsonfile))
if __name__ == "__main__":