[Python] Align files in root dir, dockers/ and files/ with PEP8 standards (#6109)
**- Why I did it** Align style with slightly modified PEP8 standards (extend maximum line length to 120 chars). This will also help in the transition to Python 3, where it is more strict about whitespace, plus it helps unify style among the SONiC codebase. Will tackle other directories in separate PRs. **- How I did it** Using `autopep8 --in-place --max-line-length 120` and some manual tweaks.
This commit is contained in:
parent
0e101247ac
commit
905a5127bb
@ -65,5 +65,6 @@ def main():
|
||||
p.sendline('sync')
|
||||
p.expect([cmd_prompt])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -6,6 +6,7 @@ import subprocess
|
||||
import syslog
|
||||
from swsssdk import ConfigDBConnector
|
||||
|
||||
|
||||
class BGPConfigDaemon:
|
||||
|
||||
def __init__(self):
|
||||
@ -15,17 +16,18 @@ class BGPConfigDaemon:
|
||||
self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR')
|
||||
|
||||
def __run_command(self, command):
|
||||
# print command
|
||||
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
|
||||
stdout = p.communicate()[0]
|
||||
p.wait()
|
||||
if p.returncode != 0:
|
||||
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(p.returncode, command, stdout))
|
||||
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(
|
||||
p.returncode, command, stdout))
|
||||
|
||||
def metadata_handler(self, key, data):
|
||||
if key == 'localhost' and data.has_key('bgp_asn'):
|
||||
if data['bgp_asn'] != self.bgp_asn:
|
||||
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(data['bgp_asn'], self.bgp_asn))
|
||||
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(
|
||||
data['bgp_asn'], self.bgp_asn))
|
||||
self.__run_command("supervisorctl restart start.sh")
|
||||
self.__run_command("service quagga restart")
|
||||
self.bgp_asn = data['bgp_asn']
|
||||
@ -38,14 +40,17 @@ class BGPConfigDaemon:
|
||||
self.__run_command(command)
|
||||
self.bgp_neighbor.pop(key)
|
||||
else:
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(self.bgp_asn, key, data['asn'])
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(
|
||||
self.bgp_asn, key, data['asn'])
|
||||
self.__run_command(command)
|
||||
if data.has_key('name'):
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(self.bgp_asn, key, data['name'])
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(
|
||||
self.bgp_asn, key, data['name'])
|
||||
self.__run_command(command)
|
||||
if data.has_key('admin_status'):
|
||||
command_mod = 'no ' if data['admin_status'] == 'up' else ''
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(self.bgp_asn, command_mod, key)
|
||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(
|
||||
self.bgp_asn, command_mod, key)
|
||||
self.__run_command(command)
|
||||
self.bgp_neighbor[key] = data
|
||||
|
||||
@ -61,5 +66,6 @@ def main():
|
||||
daemon = BGPConfigDaemon()
|
||||
daemon.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -83,7 +83,8 @@ class LldpManager(daemon_base.DaemonBase):
|
||||
(init_status, init_fvp) = port_table.get("PortInitDone")
|
||||
# The initialization procedure is done, but don't have this port entry
|
||||
if init_status:
|
||||
self.log_error("Port '{}' not found in {} table in App DB".format(port_name, swsscommon.APP_PORT_TABLE_NAME))
|
||||
self.log_error("Port '{}' not found in {} table in App DB".format(
|
||||
port_name, swsscommon.APP_PORT_TABLE_NAME))
|
||||
return False
|
||||
|
||||
def generate_pending_lldp_config_cmd_for_port(self, port_name):
|
||||
@ -110,7 +111,8 @@ class LldpManager(daemon_base.DaemonBase):
|
||||
port_desc = port_table_dict.get("description")
|
||||
|
||||
else:
|
||||
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(port_name, swsscommon.CFG_PORT_TABLE_NAME))
|
||||
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(
|
||||
port_name, swsscommon.CFG_PORT_TABLE_NAME))
|
||||
port_alias = port_name
|
||||
|
||||
lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias)
|
||||
@ -233,6 +235,7 @@ class LldpManager(daemon_base.DaemonBase):
|
||||
|
||||
# ============================= Functions =============================
|
||||
|
||||
|
||||
def main():
|
||||
# Instantiate a LldpManager object
|
||||
lldpmgr = LldpManager(SYSLOG_IDENTIFIER)
|
||||
@ -242,16 +245,19 @@ def main():
|
||||
|
||||
lldpmgr.run()
|
||||
|
||||
|
||||
def run_cmd(self, cmd):
|
||||
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
(stdout, stderr) = proc.communicate()
|
||||
return proc.returncode, stderr
|
||||
|
||||
|
||||
def check_timeout(self, start_time):
|
||||
if time.time() - start_time > PORT_INIT_TIMEOUT:
|
||||
self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT))
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -59,4 +59,3 @@ def main():
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
@ -14,6 +14,7 @@ SYSLOG_IDENTIFIER = 'port_index_mapper'
|
||||
logger = Logger(SYSLOG_IDENTIFIER)
|
||||
logger.set_min_log_priority_info()
|
||||
|
||||
|
||||
class PortIndexMapper(object):
|
||||
|
||||
def __init__(self):
|
||||
@ -106,6 +107,7 @@ def main():
|
||||
port_mapper.populate()
|
||||
port_mapper.listen()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
rc = 0
|
||||
try:
|
||||
|
@ -14,16 +14,19 @@ CONFIG_FILES = {
|
||||
|
||||
OUTPUT_FILE = os.path.abspath('./asic_config_checksum')
|
||||
|
||||
|
||||
def log_info(msg):
|
||||
syslog.openlog(SYSLOG_IDENTIFIER)
|
||||
syslog.syslog(syslog.LOG_INFO, msg)
|
||||
syslog.closelog()
|
||||
|
||||
|
||||
def log_error(msg):
|
||||
syslog.openlog(SYSLOG_IDENTIFIER)
|
||||
syslog.syslog(syslog.LOG_ERR, msg)
|
||||
syslog.closelog()
|
||||
|
||||
|
||||
def get_config_files(config_file_map):
|
||||
'''
|
||||
Generates a list of absolute paths to ASIC config files.
|
||||
@ -34,6 +37,7 @@ def get_config_files(config_file_map):
|
||||
config_files.append(os.path.join(path, config_file))
|
||||
return config_files
|
||||
|
||||
|
||||
def generate_checksum(checksum_files):
|
||||
'''
|
||||
Generates a checksum for a given list of files. Returns None if an error
|
||||
@ -54,6 +58,7 @@ def generate_checksum(checksum_files):
|
||||
|
||||
return checksum.hexdigest()
|
||||
|
||||
|
||||
def main():
|
||||
config_files = sorted(get_config_files(CONFIG_FILES))
|
||||
checksum = generate_checksum(config_files)
|
||||
@ -63,5 +68,6 @@ def main():
|
||||
with open(OUTPUT_FILE, 'w') as output:
|
||||
output.write(checksum + '\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -167,7 +167,6 @@ class Handler(FileSystemEventHandler):
|
||||
Handler.wait_for_file_write_complete(event.src_path)
|
||||
Handler.handle_file(event.src_path)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def wait_for_file_write_complete(path):
|
||||
mtime = 0
|
||||
@ -188,7 +187,6 @@ class Handler(FileSystemEventHandler):
|
||||
|
||||
logger.log_debug("File write complete - " + path)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def handle_file(path):
|
||||
lpath = "/".join(cwd)
|
||||
@ -239,13 +237,13 @@ class Handler(FileSystemEventHandler):
|
||||
break
|
||||
|
||||
except Exception as ex:
|
||||
logger.log_error("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i))
|
||||
logger.log_error("core uploader failed: Failed during upload (" +
|
||||
coref + ") err: (" + str(ex) + ") retry:" + str(i))
|
||||
if not os.path.exists(fpath):
|
||||
break
|
||||
i += 1
|
||||
time.sleep(PAUSE_ON_FAIL)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def scan():
|
||||
for e in os.listdir(CORE_FILE_PATH):
|
||||
@ -262,4 +260,3 @@ if __name__ == '__main__':
|
||||
w.run()
|
||||
except Exception as e:
|
||||
logger.log_err("core uploader failed: " + str(e) + " Exiting ...")
|
||||
|
||||
|
@ -44,6 +44,7 @@ g_thread_exit_event = threading.Event()
|
||||
g_service = []
|
||||
g_dep_services = []
|
||||
|
||||
|
||||
def wait_for_container(docker_client, container_name):
|
||||
while True:
|
||||
while docker_client.inspect_container(container_name)['State']['Status'] != "running":
|
||||
@ -62,6 +63,7 @@ def wait_for_container(docker_client, container_name):
|
||||
# Signal the main thread to exit
|
||||
g_thread_exit_event.set()
|
||||
|
||||
|
||||
def main():
|
||||
thread_list = []
|
||||
|
||||
@ -102,5 +104,6 @@ Examples:
|
||||
g_thread_exit_event.wait()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -10,6 +10,7 @@ SYSLOG_IDENTIFIER = 'core_cleanup.py'
|
||||
CORE_FILE_DIR = '/var/core/'
|
||||
MAX_CORE_FILES = 4
|
||||
|
||||
|
||||
def main():
|
||||
logger = Logger(SYSLOG_IDENTIFIER)
|
||||
logger.set_min_log_priority_info()
|
||||
@ -39,5 +40,6 @@ def main():
|
||||
|
||||
logger.log_info('Finished cleaning up core files')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -21,6 +21,8 @@ CRITICAL_PROCESSES_FILE = '/etc/supervisor/critical_processes'
|
||||
FEATURE_TABLE_NAME = 'FEATURE'
|
||||
|
||||
# Read the critical processes/group names from CRITICAL_PROCESSES_FILE
|
||||
|
||||
|
||||
def get_critical_group_and_process_list():
|
||||
critical_group_list = []
|
||||
critical_process_list = []
|
||||
@ -29,7 +31,8 @@ def get_critical_group_and_process_list():
|
||||
for line in file:
|
||||
line_info = line.strip(' \n').split(':')
|
||||
if len(line_info) != 2:
|
||||
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||
syslog.syslog(syslog.LOG_ERR,
|
||||
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||
sys.exit(5)
|
||||
|
||||
identifier_key = line_info[0].strip()
|
||||
@ -39,11 +42,13 @@ def get_critical_group_and_process_list():
|
||||
elif identifier_key == "program" and identifier_value:
|
||||
critical_process_list.append(identifier_value)
|
||||
else:
|
||||
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||
syslog.syslog(syslog.LOG_ERR,
|
||||
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||
sys.exit(6)
|
||||
|
||||
return critical_group_list, critical_process_list
|
||||
|
||||
|
||||
def main(argv):
|
||||
container_name = None
|
||||
opts, args = getopt.getopt(argv, "c:", ["container-name="])
|
||||
@ -90,7 +95,8 @@ def main(argv):
|
||||
|
||||
restart_feature = features_table[container_name].get('auto_restart')
|
||||
if not restart_feature:
|
||||
syslog.syslog(syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
|
||||
syslog.syslog(
|
||||
syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
|
||||
sys.exit(4)
|
||||
|
||||
# If auto-restart feature is not disabled and at the same time
|
||||
|
@ -11,8 +11,7 @@ chassis_db = 'CHASSIS_APP_DB'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=
|
||||
"Update chassis_db config from database-config.json")
|
||||
parser = argparse.ArgumentParser(description="Update chassis_db config from database-config.json")
|
||||
parser.add_argument("-j", "--json", help="databse-config json file", nargs='?',
|
||||
const=database_config_file)
|
||||
parser.add_argument("-p", "--port", help="update port number", nargs='?')
|
||||
|
Loading…
Reference in New Issue
Block a user