[Python] Align files in root dir, dockers/ and files/ with PEP8 standards (#6109)

**- Why I did it**

Align style with slightly modified PEP8 standards (extend maximum line length to 120 chars). This will also help in the transition to Python 3, where it is more strict about whitespace, plus it helps unify style among the SONiC codebase. Will tackle other directories in separate PRs.

**- How I did it**

Using `autopep8 --in-place --max-line-length 120` and some manual tweaks.
This commit is contained in:
Joe LeVeque 2020-12-03 15:57:50 -08:00 committed by GitHub
parent 0e101247ac
commit 905a5127bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 102 additions and 75 deletions

View File

@ -65,5 +65,6 @@ def main():
p.sendline('sync') p.sendline('sync')
p.expect([cmd_prompt]) p.expect([cmd_prompt])
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -6,6 +6,7 @@ import subprocess
import syslog import syslog
from swsssdk import ConfigDBConnector from swsssdk import ConfigDBConnector
class BGPConfigDaemon: class BGPConfigDaemon:
def __init__(self): def __init__(self):
@ -15,17 +16,18 @@ class BGPConfigDaemon:
self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR') self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR')
def __run_command(self, command): def __run_command(self, command):
# print command
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout = p.communicate()[0] stdout = p.communicate()[0]
p.wait() p.wait()
if p.returncode != 0: if p.returncode != 0:
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(p.returncode, command, stdout)) syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(
p.returncode, command, stdout))
def metadata_handler(self, key, data): def metadata_handler(self, key, data):
if key == 'localhost' and data.has_key('bgp_asn'): if key == 'localhost' and data.has_key('bgp_asn'):
if data['bgp_asn'] != self.bgp_asn: if data['bgp_asn'] != self.bgp_asn:
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(data['bgp_asn'], self.bgp_asn)) syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(
data['bgp_asn'], self.bgp_asn))
self.__run_command("supervisorctl restart start.sh") self.__run_command("supervisorctl restart start.sh")
self.__run_command("service quagga restart") self.__run_command("service quagga restart")
self.bgp_asn = data['bgp_asn'] self.bgp_asn = data['bgp_asn']
@ -38,14 +40,17 @@ class BGPConfigDaemon:
self.__run_command(command) self.__run_command(command)
self.bgp_neighbor.pop(key) self.bgp_neighbor.pop(key)
else: else:
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(self.bgp_asn, key, data['asn']) command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(
self.bgp_asn, key, data['asn'])
self.__run_command(command) self.__run_command(command)
if data.has_key('name'): if data.has_key('name'):
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(self.bgp_asn, key, data['name']) command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(
self.bgp_asn, key, data['name'])
self.__run_command(command) self.__run_command(command)
if data.has_key('admin_status'): if data.has_key('admin_status'):
command_mod = 'no ' if data['admin_status'] == 'up' else '' command_mod = 'no ' if data['admin_status'] == 'up' else ''
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(self.bgp_asn, command_mod, key) command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(
self.bgp_asn, command_mod, key)
self.__run_command(command) self.__run_command(command)
self.bgp_neighbor[key] = data self.bgp_neighbor[key] = data
@ -61,5 +66,6 @@ def main():
daemon = BGPConfigDaemon() daemon = BGPConfigDaemon()
daemon.start() daemon.start()
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -81,9 +81,10 @@ class LldpManager(daemon_base.DaemonBase):
else: else:
# Retrieve PortInitDone entry from the Port table # Retrieve PortInitDone entry from the Port table
(init_status, init_fvp) = port_table.get("PortInitDone") (init_status, init_fvp) = port_table.get("PortInitDone")
#The initialization procedure is done, but don't have this port entry # The initialization procedure is done, but don't have this port entry
if init_status: if init_status:
self.log_error("Port '{}' not found in {} table in App DB".format(port_name, swsscommon.APP_PORT_TABLE_NAME)) self.log_error("Port '{}' not found in {} table in App DB".format(
port_name, swsscommon.APP_PORT_TABLE_NAME))
return False return False
def generate_pending_lldp_config_cmd_for_port(self, port_name): def generate_pending_lldp_config_cmd_for_port(self, port_name):
@ -110,7 +111,8 @@ class LldpManager(daemon_base.DaemonBase):
port_desc = port_table_dict.get("description") port_desc = port_table_dict.get("description")
else: else:
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(port_name, swsscommon.CFG_PORT_TABLE_NAME)) self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(
port_name, swsscommon.CFG_PORT_TABLE_NAME))
port_alias = port_name port_alias = port_name
lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias) lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias)
@ -233,6 +235,7 @@ class LldpManager(daemon_base.DaemonBase):
# ============================= Functions ============================= # ============================= Functions =============================
def main(): def main():
# Instantiate a LldpManager object # Instantiate a LldpManager object
lldpmgr = LldpManager(SYSLOG_IDENTIFIER) lldpmgr = LldpManager(SYSLOG_IDENTIFIER)
@ -242,16 +245,19 @@ def main():
lldpmgr.run() lldpmgr.run()
def run_cmd(self, cmd): def run_cmd(self, cmd):
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate() (stdout, stderr) = proc.communicate()
return proc.returncode, stderr return proc.returncode, stderr
def check_timeout(self, start_time): def check_timeout(self, start_time):
if time.time() - start_time > PORT_INIT_TIMEOUT: if time.time() - start_time > PORT_INIT_TIMEOUT:
self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT)) self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT))
return True return True
return False return False
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -59,4 +59,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -14,6 +14,7 @@ SYSLOG_IDENTIFIER = 'port_index_mapper'
logger = Logger(SYSLOG_IDENTIFIER) logger = Logger(SYSLOG_IDENTIFIER)
logger.set_min_log_priority_info() logger.set_min_log_priority_info()
class PortIndexMapper(object): class PortIndexMapper(object):
def __init__(self): def __init__(self):
@ -106,6 +107,7 @@ def main():
port_mapper.populate() port_mapper.populate()
port_mapper.listen() port_mapper.listen()
if __name__ == '__main__': if __name__ == '__main__':
rc = 0 rc = 0
try: try:

View File

@ -14,16 +14,19 @@ CONFIG_FILES = {
OUTPUT_FILE = os.path.abspath('./asic_config_checksum') OUTPUT_FILE = os.path.abspath('./asic_config_checksum')
def log_info(msg): def log_info(msg):
syslog.openlog(SYSLOG_IDENTIFIER) syslog.openlog(SYSLOG_IDENTIFIER)
syslog.syslog(syslog.LOG_INFO, msg) syslog.syslog(syslog.LOG_INFO, msg)
syslog.closelog() syslog.closelog()
def log_error(msg): def log_error(msg):
syslog.openlog(SYSLOG_IDENTIFIER) syslog.openlog(SYSLOG_IDENTIFIER)
syslog.syslog(syslog.LOG_ERR, msg) syslog.syslog(syslog.LOG_ERR, msg)
syslog.closelog() syslog.closelog()
def get_config_files(config_file_map): def get_config_files(config_file_map):
''' '''
Generates a list of absolute paths to ASIC config files. Generates a list of absolute paths to ASIC config files.
@ -34,6 +37,7 @@ def get_config_files(config_file_map):
config_files.append(os.path.join(path, config_file)) config_files.append(os.path.join(path, config_file))
return config_files return config_files
def generate_checksum(checksum_files): def generate_checksum(checksum_files):
''' '''
Generates a checksum for a given list of files. Returns None if an error Generates a checksum for a given list of files. Returns None if an error
@ -54,6 +58,7 @@ def generate_checksum(checksum_files):
return checksum.hexdigest() return checksum.hexdigest()
def main(): def main():
config_files = sorted(get_config_files(CONFIG_FILES)) config_files = sorted(get_config_files(CONFIG_FILES))
checksum = generate_checksum(config_files) checksum = generate_checksum(config_files)
@ -63,5 +68,6 @@ def main():
with open(OUTPUT_FILE, 'w') as output: with open(OUTPUT_FILE, 'w') as output:
output.write(checksum + '\n') output.write(checksum + '\n')
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -31,7 +31,7 @@ cwd = []
HOURS_4 = (4 * 60 * 60) HOURS_4 = (4 * 60 * 60)
PAUSE_ON_FAIL = (60 * 60) PAUSE_ON_FAIL = (60 * 60)
WAIT_FILE_WRITE1 = (10 * 60) WAIT_FILE_WRITE1 = (10 * 60)
WAIT_FILE_WRITE2= (5 * 60) WAIT_FILE_WRITE2 = (5 * 60)
POLL_SLEEP = (60 * 60) POLL_SLEEP = (60 * 60)
MAX_RETRIES = 5 MAX_RETRIES = 5
UPLOAD_PREFIX = "UPLOADED_" UPLOAD_PREFIX = "UPLOADED_"
@ -62,7 +62,7 @@ class config:
while not os.path.exists(RC_FILE): while not os.path.exists(RC_FILE):
# Wait here until service restart # Wait here until service restart
logger.log_error("Unable to retrieve Azure storage credentials") logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4) time.sleep(HOURS_4)
with open(RC_FILE, 'r') as f: with open(RC_FILE, 'r') as f:
self.parsed_data = json.load(f) self.parsed_data = json.load(f)
@ -137,7 +137,7 @@ class Handler(FileSystemEventHandler):
while True: while True:
# Wait here until service restart # Wait here until service restart
logger.log_error("Unable to retrieve Azure storage credentials") logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4) time.sleep(HOURS_4)
with open("/etc/sonic/sonic_version.yml", 'r') as stream: with open("/etc/sonic/sonic_version.yml", 'r') as stream:
l = yaml.safe_load(stream) l = yaml.safe_load(stream)
@ -167,7 +167,6 @@ class Handler(FileSystemEventHandler):
Handler.wait_for_file_write_complete(event.src_path) Handler.wait_for_file_write_complete(event.src_path)
Handler.handle_file(event.src_path) Handler.handle_file(event.src_path)
@staticmethod @staticmethod
def wait_for_file_write_complete(path): def wait_for_file_write_complete(path):
mtime = 0 mtime = 0
@ -188,7 +187,6 @@ class Handler(FileSystemEventHandler):
logger.log_debug("File write complete - " + path) logger.log_debug("File write complete - " + path)
@staticmethod @staticmethod
def handle_file(path): def handle_file(path):
lpath = "/".join(cwd) lpath = "/".join(cwd)
@ -239,13 +237,13 @@ class Handler(FileSystemEventHandler):
break break
except Exception as ex: except Exception as ex:
logger.log_error("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i)) logger.log_error("core uploader failed: Failed during upload (" +
coref + ") err: (" + str(ex) + ") retry:" + str(i))
if not os.path.exists(fpath): if not os.path.exists(fpath):
break break
i += 1 i += 1
time.sleep(PAUSE_ON_FAIL) time.sleep(PAUSE_ON_FAIL)
@staticmethod @staticmethod
def scan(): def scan():
for e in os.listdir(CORE_FILE_PATH): for e in os.listdir(CORE_FILE_PATH):
@ -262,4 +260,3 @@ if __name__ == '__main__':
w.run() w.run()
except Exception as e: except Exception as e:
logger.log_err("core uploader failed: " + str(e) + " Exiting ...") logger.log_err("core uploader failed: " + str(e) + " Exiting ...")

View File

@ -44,6 +44,7 @@ g_thread_exit_event = threading.Event()
g_service = [] g_service = []
g_dep_services = [] g_dep_services = []
def wait_for_container(docker_client, container_name): def wait_for_container(docker_client, container_name):
while True: while True:
while docker_client.inspect_container(container_name)['State']['Status'] != "running": while docker_client.inspect_container(container_name)['State']['Status'] != "running":
@ -62,6 +63,7 @@ def wait_for_container(docker_client, container_name):
# Signal the main thread to exit # Signal the main thread to exit
g_thread_exit_event.set() g_thread_exit_event.set()
def main(): def main():
thread_list = [] thread_list = []
@ -74,8 +76,8 @@ Examples:
docker-wait-any -s swss -d syncd teamd docker-wait-any -s swss -d syncd teamd
""") """)
parser.add_argument('-s','--service', nargs='+', default=None, help='name of the service') parser.add_argument('-s', '--service', nargs='+', default=None, help='name of the service')
parser.add_argument('-d','--dependent', nargs='*', default=None, help='other dependent services') parser.add_argument('-d', '--dependent', nargs='*', default=None, help='other dependent services')
args = parser.parse_args() args = parser.parse_args()
global g_service global g_service
@ -102,5 +104,6 @@ Examples:
g_thread_exit_event.wait() g_thread_exit_event.wait()
sys.exit(0) sys.exit(0)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -10,6 +10,7 @@ SYSLOG_IDENTIFIER = 'core_cleanup.py'
CORE_FILE_DIR = '/var/core/' CORE_FILE_DIR = '/var/core/'
MAX_CORE_FILES = 4 MAX_CORE_FILES = 4
def main(): def main():
logger = Logger(SYSLOG_IDENTIFIER) logger = Logger(SYSLOG_IDENTIFIER)
logger.set_min_log_priority_info() logger.set_min_log_priority_info()
@ -28,7 +29,7 @@ def main():
curr_files.append(f) curr_files.append(f)
if len(curr_files) > MAX_CORE_FILES: if len(curr_files) > MAX_CORE_FILES:
curr_files.sort(reverse = True, key = lambda x: datetime.utcfromtimestamp(int(x.split('.')[1]))) curr_files.sort(reverse=True, key=lambda x: datetime.utcfromtimestamp(int(x.split('.')[1])))
oldest_core = curr_files[MAX_CORE_FILES] oldest_core = curr_files[MAX_CORE_FILES]
logger.log_info('Deleting {}'.format(oldest_core)) logger.log_info('Deleting {}'.format(oldest_core))
try: try:
@ -39,5 +40,6 @@ def main():
logger.log_info('Finished cleaning up core files') logger.log_info('Finished cleaning up core files')
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -21,6 +21,8 @@ CRITICAL_PROCESSES_FILE = '/etc/supervisor/critical_processes'
FEATURE_TABLE_NAME = 'FEATURE' FEATURE_TABLE_NAME = 'FEATURE'
# Read the critical processes/group names from CRITICAL_PROCESSES_FILE # Read the critical processes/group names from CRITICAL_PROCESSES_FILE
def get_critical_group_and_process_list(): def get_critical_group_and_process_list():
critical_group_list = [] critical_group_list = []
critical_process_list = [] critical_process_list = []
@ -29,7 +31,8 @@ def get_critical_group_and_process_list():
for line in file: for line in file:
line_info = line.strip(' \n').split(':') line_info = line.strip(' \n').split(':')
if len(line_info) != 2: if len(line_info) != 2:
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line)) syslog.syslog(syslog.LOG_ERR,
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
sys.exit(5) sys.exit(5)
identifier_key = line_info[0].strip() identifier_key = line_info[0].strip()
@ -39,11 +42,13 @@ def get_critical_group_and_process_list():
elif identifier_key == "program" and identifier_value: elif identifier_key == "program" and identifier_value:
critical_process_list.append(identifier_value) critical_process_list.append(identifier_value)
else: else:
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line)) syslog.syslog(syslog.LOG_ERR,
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
sys.exit(6) sys.exit(6)
return critical_group_list, critical_process_list return critical_group_list, critical_process_list
def main(argv): def main(argv):
container_name = None container_name = None
opts, args = getopt.getopt(argv, "c:", ["container-name="]) opts, args = getopt.getopt(argv, "c:", ["container-name="])
@ -90,7 +95,8 @@ def main(argv):
restart_feature = features_table[container_name].get('auto_restart') restart_feature = features_table[container_name].get('auto_restart')
if not restart_feature: if not restart_feature:
syslog.syslog(syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name)) syslog.syslog(
syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
sys.exit(4) sys.exit(4)
# If auto-restart feature is not disabled and at the same time # If auto-restart feature is not disabled and at the same time

View File

@ -11,14 +11,13 @@ chassis_db = 'CHASSIS_APP_DB'
def main(): def main():
parser = argparse.ArgumentParser(description= parser = argparse.ArgumentParser(description="Update chassis_db config from database-config.json")
"Update chassis_db config from database-config.json")
parser.add_argument("-j", "--json", help="databse-config json file", nargs='?', parser.add_argument("-j", "--json", help="databse-config json file", nargs='?',
const=database_config_file) const=database_config_file)
parser.add_argument("-p", "--port", help="update port number", nargs='?' ) parser.add_argument("-p", "--port", help="update port number", nargs='?')
group = parser.add_mutually_exclusive_group() group = parser.add_mutually_exclusive_group()
group.add_argument("-k", "--keep", help="keep configuration", action='store_true' ) group.add_argument("-k", "--keep", help="keep configuration", action='store_true')
group.add_argument("-d", "--delete", help="delete configuration", action='store_true' ) group.add_argument("-d", "--delete", help="delete configuration", action='store_true')
args = parser.parse_args() args = parser.parse_args()
jsonfile = "" jsonfile = ""