[Python] Align files in root dir, dockers/ and files/ with PEP8 standards (#6109)
**- Why I did it** Align style with slightly modified PEP8 standards (extend maximum line length to 120 chars). This will also help in the transition to Python 3, where it is more strict about whitespace, plus it helps unify style among the SONiC codebase. Will tackle other directories in separate PRs. **- How I did it** Using `autopep8 --in-place --max-line-length 120` and some manual tweaks.
This commit is contained in:
parent
0e101247ac
commit
905a5127bb
@ -65,5 +65,6 @@ def main():
|
|||||||
p.sendline('sync')
|
p.sendline('sync')
|
||||||
p.expect([cmd_prompt])
|
p.expect([cmd_prompt])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -6,6 +6,7 @@ import subprocess
|
|||||||
import syslog
|
import syslog
|
||||||
from swsssdk import ConfigDBConnector
|
from swsssdk import ConfigDBConnector
|
||||||
|
|
||||||
|
|
||||||
class BGPConfigDaemon:
|
class BGPConfigDaemon:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -15,17 +16,18 @@ class BGPConfigDaemon:
|
|||||||
self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR')
|
self.bgp_neighbor = self.config_db.get_table('BGP_NEIGHBOR')
|
||||||
|
|
||||||
def __run_command(self, command):
|
def __run_command(self, command):
|
||||||
# print command
|
|
||||||
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
|
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
|
||||||
stdout = p.communicate()[0]
|
stdout = p.communicate()[0]
|
||||||
p.wait()
|
p.wait()
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(p.returncode, command, stdout))
|
syslog.syslog(syslog.LOG_ERR, '[bgp cfgd] command execution returned {}. Command: "{}", stdout: "{}"'.format(
|
||||||
|
p.returncode, command, stdout))
|
||||||
|
|
||||||
def metadata_handler(self, key, data):
|
def metadata_handler(self, key, data):
|
||||||
if key == 'localhost' and data.has_key('bgp_asn'):
|
if key == 'localhost' and data.has_key('bgp_asn'):
|
||||||
if data['bgp_asn'] != self.bgp_asn:
|
if data['bgp_asn'] != self.bgp_asn:
|
||||||
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(data['bgp_asn'], self.bgp_asn))
|
syslog.syslog(syslog.LOG_INFO, '[bgp cfgd] ASN changed to {} from {}, restart BGP...'.format(
|
||||||
|
data['bgp_asn'], self.bgp_asn))
|
||||||
self.__run_command("supervisorctl restart start.sh")
|
self.__run_command("supervisorctl restart start.sh")
|
||||||
self.__run_command("service quagga restart")
|
self.__run_command("service quagga restart")
|
||||||
self.bgp_asn = data['bgp_asn']
|
self.bgp_asn = data['bgp_asn']
|
||||||
@ -38,22 +40,25 @@ class BGPConfigDaemon:
|
|||||||
self.__run_command(command)
|
self.__run_command(command)
|
||||||
self.bgp_neighbor.pop(key)
|
self.bgp_neighbor.pop(key)
|
||||||
else:
|
else:
|
||||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(self.bgp_asn, key, data['asn'])
|
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} remote-as {}'".format(
|
||||||
|
self.bgp_asn, key, data['asn'])
|
||||||
self.__run_command(command)
|
self.__run_command(command)
|
||||||
if data.has_key('name'):
|
if data.has_key('name'):
|
||||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(self.bgp_asn, key, data['name'])
|
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c 'neighbor {} description {}'".format(
|
||||||
|
self.bgp_asn, key, data['name'])
|
||||||
self.__run_command(command)
|
self.__run_command(command)
|
||||||
if data.has_key('admin_status'):
|
if data.has_key('admin_status'):
|
||||||
command_mod = 'no ' if data['admin_status'] == 'up' else ''
|
command_mod = 'no ' if data['admin_status'] == 'up' else ''
|
||||||
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(self.bgp_asn, command_mod, key)
|
command = "vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}neighbor {} shutdown'".format(
|
||||||
|
self.bgp_asn, command_mod, key)
|
||||||
self.__run_command(command)
|
self.__run_command(command)
|
||||||
self.bgp_neighbor[key] = data
|
self.bgp_neighbor[key] = data
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.config_db.subscribe('BGP_NEIGHBOR',
|
self.config_db.subscribe('BGP_NEIGHBOR',
|
||||||
lambda table, key, data: self.bgp_handler(key, data))
|
lambda table, key, data: self.bgp_handler(key, data))
|
||||||
self.config_db.subscribe('DEVICE_METADATA',
|
self.config_db.subscribe('DEVICE_METADATA',
|
||||||
lambda table, key, data: self.metadata_handler(key, data))
|
lambda table, key, data: self.metadata_handler(key, data))
|
||||||
self.config_db.listen()
|
self.config_db.listen()
|
||||||
|
|
||||||
|
|
||||||
@ -61,5 +66,6 @@ def main():
|
|||||||
daemon = BGPConfigDaemon()
|
daemon = BGPConfigDaemon()
|
||||||
daemon.start()
|
daemon.start()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -81,9 +81,10 @@ class LldpManager(daemon_base.DaemonBase):
|
|||||||
else:
|
else:
|
||||||
# Retrieve PortInitDone entry from the Port table
|
# Retrieve PortInitDone entry from the Port table
|
||||||
(init_status, init_fvp) = port_table.get("PortInitDone")
|
(init_status, init_fvp) = port_table.get("PortInitDone")
|
||||||
#The initialization procedure is done, but don't have this port entry
|
# The initialization procedure is done, but don't have this port entry
|
||||||
if init_status:
|
if init_status:
|
||||||
self.log_error("Port '{}' not found in {} table in App DB".format(port_name, swsscommon.APP_PORT_TABLE_NAME))
|
self.log_error("Port '{}' not found in {} table in App DB".format(
|
||||||
|
port_name, swsscommon.APP_PORT_TABLE_NAME))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def generate_pending_lldp_config_cmd_for_port(self, port_name):
|
def generate_pending_lldp_config_cmd_for_port(self, port_name):
|
||||||
@ -105,12 +106,13 @@ class LldpManager(daemon_base.DaemonBase):
|
|||||||
if not port_alias:
|
if not port_alias:
|
||||||
self.log_info("Unable to retrieve port alias for port '{}'. Using port name instead.".format(port_name))
|
self.log_info("Unable to retrieve port alias for port '{}'. Using port name instead.".format(port_name))
|
||||||
port_alias = port_name
|
port_alias = port_name
|
||||||
|
|
||||||
# Get the port description. If None or empty string, we'll skip this configuration
|
# Get the port description. If None or empty string, we'll skip this configuration
|
||||||
port_desc = port_table_dict.get("description")
|
port_desc = port_table_dict.get("description")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(port_name, swsscommon.CFG_PORT_TABLE_NAME))
|
self.log_error("Port '{}' not found in {} table in Config DB. Using port name instead of port alias.".format(
|
||||||
|
port_name, swsscommon.CFG_PORT_TABLE_NAME))
|
||||||
port_alias = port_name
|
port_alias = port_name
|
||||||
|
|
||||||
lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias)
|
lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias)
|
||||||
@ -233,6 +235,7 @@ class LldpManager(daemon_base.DaemonBase):
|
|||||||
|
|
||||||
# ============================= Functions =============================
|
# ============================= Functions =============================
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Instantiate a LldpManager object
|
# Instantiate a LldpManager object
|
||||||
lldpmgr = LldpManager(SYSLOG_IDENTIFIER)
|
lldpmgr = LldpManager(SYSLOG_IDENTIFIER)
|
||||||
@ -242,16 +245,19 @@ def main():
|
|||||||
|
|
||||||
lldpmgr.run()
|
lldpmgr.run()
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(self, cmd):
|
def run_cmd(self, cmd):
|
||||||
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
(stdout, stderr) = proc.communicate()
|
(stdout, stderr) = proc.communicate()
|
||||||
return proc.returncode, stderr
|
return proc.returncode, stderr
|
||||||
|
|
||||||
|
|
||||||
def check_timeout(self, start_time):
|
def check_timeout(self, start_time):
|
||||||
if time.time() - start_time > PORT_INIT_TIMEOUT:
|
if time.time() - start_time > PORT_INIT_TIMEOUT:
|
||||||
self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT))
|
self.log_error("Port init timeout reached ({} seconds), resuming lldpd...".format(PORT_INIT_TIMEOUT))
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -23,7 +23,7 @@ SYSLOG_IDENTIFIER = os.path.basename(__file__)
|
|||||||
|
|
||||||
WARM_BOOT_FILE_DIR = '/var/warmboot/nat/'
|
WARM_BOOT_FILE_DIR = '/var/warmboot/nat/'
|
||||||
NAT_WARM_BOOT_FILE = 'nat_entries.dump'
|
NAT_WARM_BOOT_FILE = 'nat_entries.dump'
|
||||||
IP_PROTO_TCP = '6'
|
IP_PROTO_TCP = '6'
|
||||||
|
|
||||||
MATCH_CONNTRACK_ENTRY = '^(\w+)\s+(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+)'
|
MATCH_CONNTRACK_ENTRY = '^(\w+)\s+(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+).*src=([\d.]+)\s+dst=([\d.]+)\s+sport=(\d+)\s+dport=(\d+)'
|
||||||
|
|
||||||
@ -38,8 +38,8 @@ def add_nat_conntrack_entry_in_kernel(ipproto, srcip, dstip, srcport, dstport, n
|
|||||||
if (ipproto == IP_PROTO_TCP):
|
if (ipproto == IP_PROTO_TCP):
|
||||||
state = ' --state ESTABLISHED '
|
state = ' --state ESTABLISHED '
|
||||||
ctcmd = 'conntrack -I -n ' + natdstip + ':' + natdstport + ' -g ' + natsrcip + ':' + natsrcport + \
|
ctcmd = 'conntrack -I -n ' + natdstip + ':' + natdstport + ' -g ' + natsrcip + ':' + natsrcport + \
|
||||||
' --protonum ' + ipproto + state + ' --timeout 432000 --src ' + srcip + ' --sport ' + srcport + \
|
' --protonum ' + ipproto + state + ' --timeout 432000 --src ' + srcip + ' --sport ' + srcport + \
|
||||||
' --dst ' + dstip + ' --dport ' + dstport + ' -u ASSURED'
|
' --dst ' + dstip + ' --dport ' + dstport + ' -u ASSURED'
|
||||||
subprocess.call(ctcmd, shell=True)
|
subprocess.call(ctcmd, shell=True)
|
||||||
logger.log_info("Restored NAT entry: {}".format(ctcmd))
|
logger.log_info("Restored NAT entry: {}".format(ctcmd))
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ def restore_update_kernel_nat_entries(filename):
|
|||||||
cmdargs = list(ctline.pop(0))
|
cmdargs = list(ctline.pop(0))
|
||||||
proto = cmdargs.pop(0)
|
proto = cmdargs.pop(0)
|
||||||
if proto not in ('tcp', 'udp'):
|
if proto not in ('tcp', 'udp'):
|
||||||
continue
|
continue
|
||||||
add_nat_conntrack_entry_in_kernel(*cmdargs)
|
add_nat_conntrack_entry_in_kernel(*cmdargs)
|
||||||
|
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ def main():
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# Remove the dump file after restoration
|
# Remove the dump file after restoration
|
||||||
os.remove(WARM_BOOT_FILE_DIR + NAT_WARM_BOOT_FILE)
|
os.remove(WARM_BOOT_FILE_DIR + NAT_WARM_BOOT_FILE)
|
||||||
|
|
||||||
# set statedb to signal other processes like natsyncd
|
# set statedb to signal other processes like natsyncd
|
||||||
set_statedb_nat_restore_done()
|
set_statedb_nat_restore_done()
|
||||||
|
@ -4,7 +4,7 @@ import time
|
|||||||
import swsssdk
|
import swsssdk
|
||||||
|
|
||||||
# ALPHA defines the size of the window over which we calculate the average value. ALPHA is 2/(N+1) where N is the interval(window size)
|
# ALPHA defines the size of the window over which we calculate the average value. ALPHA is 2/(N+1) where N is the interval(window size)
|
||||||
# In this case we configure the window to be 10s. This way if we have a huge 1s spike in traffic,
|
# In this case we configure the window to be 10s. This way if we have a huge 1s spike in traffic,
|
||||||
# the average rate value will show a curve descending from the spike to the usual rate over approximately 10s.
|
# the average rate value will show a curve descending from the spike to the usual rate over approximately 10s.
|
||||||
DEFAULT_SMOOTH_INTERVAL = '10'
|
DEFAULT_SMOOTH_INTERVAL = '10'
|
||||||
DEFAULT_ALPHA = '0.18'
|
DEFAULT_ALPHA = '0.18'
|
||||||
@ -17,7 +17,7 @@ def enable_counter_group(db, name):
|
|||||||
|
|
||||||
|
|
||||||
def enable_rates():
|
def enable_rates():
|
||||||
# set the default interval for rates
|
# set the default interval for rates
|
||||||
counters_db = swsssdk.SonicV2Connector()
|
counters_db = swsssdk.SonicV2Connector()
|
||||||
counters_db.connect('COUNTERS_DB')
|
counters_db.connect('COUNTERS_DB')
|
||||||
counters_db.set('COUNTERS_DB', 'RATES:PORT', 'PORT_SMOOTH_INTERVAL', DEFAULT_SMOOTH_INTERVAL)
|
counters_db.set('COUNTERS_DB', 'RATES:PORT', 'PORT_SMOOTH_INTERVAL', DEFAULT_SMOOTH_INTERVAL)
|
||||||
@ -59,4 +59,3 @@ def main():
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ SYSLOG_IDENTIFIER = 'port_index_mapper'
|
|||||||
logger = Logger(SYSLOG_IDENTIFIER)
|
logger = Logger(SYSLOG_IDENTIFIER)
|
||||||
logger.set_min_log_priority_info()
|
logger.set_min_log_priority_info()
|
||||||
|
|
||||||
|
|
||||||
class PortIndexMapper(object):
|
class PortIndexMapper(object):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -106,6 +107,7 @@ def main():
|
|||||||
port_mapper.populate()
|
port_mapper.populate()
|
||||||
port_mapper.listen()
|
port_mapper.listen()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
rc = 0
|
rc = 0
|
||||||
try:
|
try:
|
||||||
|
@ -14,16 +14,19 @@ CONFIG_FILES = {
|
|||||||
|
|
||||||
OUTPUT_FILE = os.path.abspath('./asic_config_checksum')
|
OUTPUT_FILE = os.path.abspath('./asic_config_checksum')
|
||||||
|
|
||||||
|
|
||||||
def log_info(msg):
|
def log_info(msg):
|
||||||
syslog.openlog(SYSLOG_IDENTIFIER)
|
syslog.openlog(SYSLOG_IDENTIFIER)
|
||||||
syslog.syslog(syslog.LOG_INFO, msg)
|
syslog.syslog(syslog.LOG_INFO, msg)
|
||||||
syslog.closelog()
|
syslog.closelog()
|
||||||
|
|
||||||
|
|
||||||
def log_error(msg):
|
def log_error(msg):
|
||||||
syslog.openlog(SYSLOG_IDENTIFIER)
|
syslog.openlog(SYSLOG_IDENTIFIER)
|
||||||
syslog.syslog(syslog.LOG_ERR, msg)
|
syslog.syslog(syslog.LOG_ERR, msg)
|
||||||
syslog.closelog()
|
syslog.closelog()
|
||||||
|
|
||||||
|
|
||||||
def get_config_files(config_file_map):
|
def get_config_files(config_file_map):
|
||||||
'''
|
'''
|
||||||
Generates a list of absolute paths to ASIC config files.
|
Generates a list of absolute paths to ASIC config files.
|
||||||
@ -34,11 +37,12 @@ def get_config_files(config_file_map):
|
|||||||
config_files.append(os.path.join(path, config_file))
|
config_files.append(os.path.join(path, config_file))
|
||||||
return config_files
|
return config_files
|
||||||
|
|
||||||
|
|
||||||
def generate_checksum(checksum_files):
|
def generate_checksum(checksum_files):
|
||||||
'''
|
'''
|
||||||
Generates a checksum for a given list of files. Returns None if an error
|
Generates a checksum for a given list of files. Returns None if an error
|
||||||
occurs while reading the files.
|
occurs while reading the files.
|
||||||
|
|
||||||
NOTE: The checksum is performed in the order provided. This function does
|
NOTE: The checksum is performed in the order provided. This function does
|
||||||
NOT do any re-ordering of the files before creating the checksum.
|
NOT do any re-ordering of the files before creating the checksum.
|
||||||
'''
|
'''
|
||||||
@ -54,6 +58,7 @@ def generate_checksum(checksum_files):
|
|||||||
|
|
||||||
return checksum.hexdigest()
|
return checksum.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
config_files = sorted(get_config_files(CONFIG_FILES))
|
config_files = sorted(get_config_files(CONFIG_FILES))
|
||||||
checksum = generate_checksum(config_files)
|
checksum = generate_checksum(config_files)
|
||||||
@ -63,5 +68,6 @@ def main():
|
|||||||
with open(OUTPUT_FILE, 'w') as output:
|
with open(OUTPUT_FILE, 'w') as output:
|
||||||
output.write(checksum + '\n')
|
output.write(checksum + '\n')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -31,7 +31,7 @@ cwd = []
|
|||||||
HOURS_4 = (4 * 60 * 60)
|
HOURS_4 = (4 * 60 * 60)
|
||||||
PAUSE_ON_FAIL = (60 * 60)
|
PAUSE_ON_FAIL = (60 * 60)
|
||||||
WAIT_FILE_WRITE1 = (10 * 60)
|
WAIT_FILE_WRITE1 = (10 * 60)
|
||||||
WAIT_FILE_WRITE2= (5 * 60)
|
WAIT_FILE_WRITE2 = (5 * 60)
|
||||||
POLL_SLEEP = (60 * 60)
|
POLL_SLEEP = (60 * 60)
|
||||||
MAX_RETRIES = 5
|
MAX_RETRIES = 5
|
||||||
UPLOAD_PREFIX = "UPLOADED_"
|
UPLOAD_PREFIX = "UPLOADED_"
|
||||||
@ -62,7 +62,7 @@ class config:
|
|||||||
while not os.path.exists(RC_FILE):
|
while not os.path.exists(RC_FILE):
|
||||||
# Wait here until service restart
|
# Wait here until service restart
|
||||||
logger.log_error("Unable to retrieve Azure storage credentials")
|
logger.log_error("Unable to retrieve Azure storage credentials")
|
||||||
time.sleep (HOURS_4)
|
time.sleep(HOURS_4)
|
||||||
|
|
||||||
with open(RC_FILE, 'r') as f:
|
with open(RC_FILE, 'r') as f:
|
||||||
self.parsed_data = json.load(f)
|
self.parsed_data = json.load(f)
|
||||||
@ -137,7 +137,7 @@ class Handler(FileSystemEventHandler):
|
|||||||
while True:
|
while True:
|
||||||
# Wait here until service restart
|
# Wait here until service restart
|
||||||
logger.log_error("Unable to retrieve Azure storage credentials")
|
logger.log_error("Unable to retrieve Azure storage credentials")
|
||||||
time.sleep (HOURS_4)
|
time.sleep(HOURS_4)
|
||||||
|
|
||||||
with open("/etc/sonic/sonic_version.yml", 'r') as stream:
|
with open("/etc/sonic/sonic_version.yml", 'r') as stream:
|
||||||
l = yaml.safe_load(stream)
|
l = yaml.safe_load(stream)
|
||||||
@ -163,11 +163,10 @@ class Handler(FileSystemEventHandler):
|
|||||||
|
|
||||||
elif event.event_type == 'created':
|
elif event.event_type == 'created':
|
||||||
# Take any action here when a file is first created.
|
# Take any action here when a file is first created.
|
||||||
logger.log_debug("Received create event - " + event.src_path)
|
logger.log_debug("Received create event - " + event.src_path)
|
||||||
Handler.wait_for_file_write_complete(event.src_path)
|
Handler.wait_for_file_write_complete(event.src_path)
|
||||||
Handler.handle_file(event.src_path)
|
Handler.handle_file(event.src_path)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def wait_for_file_write_complete(path):
|
def wait_for_file_write_complete(path):
|
||||||
mtime = 0
|
mtime = 0
|
||||||
@ -186,8 +185,7 @@ class Handler(FileSystemEventHandler):
|
|||||||
raise Exception("Dump file creation is too slow: " + path)
|
raise Exception("Dump file creation is too slow: " + path)
|
||||||
# Give up as something is terribly wrong with this file.
|
# Give up as something is terribly wrong with this file.
|
||||||
|
|
||||||
logger.log_debug("File write complete - " + path)
|
logger.log_debug("File write complete - " + path)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def handle_file(path):
|
def handle_file(path):
|
||||||
@ -202,7 +200,7 @@ class Handler(FileSystemEventHandler):
|
|||||||
tarf_name = fname + ".tar.gz"
|
tarf_name = fname + ".tar.gz"
|
||||||
|
|
||||||
cfg.get_core_info(path, hostname)
|
cfg.get_core_info(path, hostname)
|
||||||
|
|
||||||
tar = tarfile.open(tarf_name, "w:gz")
|
tar = tarfile.open(tarf_name, "w:gz")
|
||||||
for e in metafiles:
|
for e in metafiles:
|
||||||
tar.add(metafiles[e])
|
tar.add(metafiles[e])
|
||||||
@ -212,14 +210,14 @@ class Handler(FileSystemEventHandler):
|
|||||||
|
|
||||||
Handler.upload_file(tarf_name, tarf_name, path)
|
Handler.upload_file(tarf_name, tarf_name, path)
|
||||||
|
|
||||||
logger.log_debug("File uploaded - " + path)
|
logger.log_debug("File uploaded - " + path)
|
||||||
os.chdir(INIT_CWD)
|
os.chdir(INIT_CWD)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def upload_file(fname, fpath, coref):
|
def upload_file(fname, fpath, coref):
|
||||||
daemonname = fname.split(".")[0]
|
daemonname = fname.split(".")[0]
|
||||||
i = 0
|
i = 0
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
svc = FileService(account_name=acctname, account_key=acctkey)
|
svc = FileService(account_name=acctname, account_key=acctkey)
|
||||||
@ -239,13 +237,13 @@ class Handler(FileSystemEventHandler):
|
|||||||
break
|
break
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.log_error("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i))
|
logger.log_error("core uploader failed: Failed during upload (" +
|
||||||
|
coref + ") err: (" + str(ex) + ") retry:" + str(i))
|
||||||
if not os.path.exists(fpath):
|
if not os.path.exists(fpath):
|
||||||
break
|
break
|
||||||
i += 1
|
i += 1
|
||||||
time.sleep(PAUSE_ON_FAIL)
|
time.sleep(PAUSE_ON_FAIL)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def scan():
|
def scan():
|
||||||
for e in os.listdir(CORE_FILE_PATH):
|
for e in os.listdir(CORE_FILE_PATH):
|
||||||
@ -262,4 +260,3 @@ if __name__ == '__main__':
|
|||||||
w.run()
|
w.run()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log_err("core uploader failed: " + str(e) + " Exiting ...")
|
logger.log_err("core uploader failed: " + str(e) + " Exiting ...")
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ g_thread_exit_event = threading.Event()
|
|||||||
g_service = []
|
g_service = []
|
||||||
g_dep_services = []
|
g_dep_services = []
|
||||||
|
|
||||||
|
|
||||||
def wait_for_container(docker_client, container_name):
|
def wait_for_container(docker_client, container_name):
|
||||||
while True:
|
while True:
|
||||||
while docker_client.inspect_container(container_name)['State']['Status'] != "running":
|
while docker_client.inspect_container(container_name)['State']['Status'] != "running":
|
||||||
@ -56,26 +57,27 @@ def wait_for_container(docker_client, container_name):
|
|||||||
# If this is a dependent service and warm restart is enabled for the system/container,
|
# If this is a dependent service and warm restart is enabled for the system/container,
|
||||||
# OR if the system is going through a fast-reboot, DON'T signal main thread to exit
|
# OR if the system is going through a fast-reboot, DON'T signal main thread to exit
|
||||||
if (container_name in g_dep_services and
|
if (container_name in g_dep_services and
|
||||||
(device_info.is_warm_restart_enabled(container_name) or device_info.is_fast_reboot_enabled())):
|
(device_info.is_warm_restart_enabled(container_name) or device_info.is_fast_reboot_enabled())):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Signal the main thread to exit
|
# Signal the main thread to exit
|
||||||
g_thread_exit_event.set()
|
g_thread_exit_event.set()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
thread_list = []
|
thread_list = []
|
||||||
|
|
||||||
docker_client = APIClient(base_url='unix://var/run/docker.sock')
|
docker_client = APIClient(base_url='unix://var/run/docker.sock')
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Wait for dependent docker services',
|
parser = argparse.ArgumentParser(description='Wait for dependent docker services',
|
||||||
formatter_class=argparse.RawTextHelpFormatter,
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
epilog="""
|
epilog="""
|
||||||
Examples:
|
Examples:
|
||||||
docker-wait-any -s swss -d syncd teamd
|
docker-wait-any -s swss -d syncd teamd
|
||||||
""")
|
""")
|
||||||
|
|
||||||
parser.add_argument('-s','--service', nargs='+', default=None, help='name of the service')
|
parser.add_argument('-s', '--service', nargs='+', default=None, help='name of the service')
|
||||||
parser.add_argument('-d','--dependent', nargs='*', default=None, help='other dependent services')
|
parser.add_argument('-d', '--dependent', nargs='*', default=None, help='other dependent services')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
global g_service
|
global g_service
|
||||||
@ -102,5 +104,6 @@ Examples:
|
|||||||
g_thread_exit_event.wait()
|
g_thread_exit_event.wait()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -10,6 +10,7 @@ SYSLOG_IDENTIFIER = 'core_cleanup.py'
|
|||||||
CORE_FILE_DIR = '/var/core/'
|
CORE_FILE_DIR = '/var/core/'
|
||||||
MAX_CORE_FILES = 4
|
MAX_CORE_FILES = 4
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logger = Logger(SYSLOG_IDENTIFIER)
|
logger = Logger(SYSLOG_IDENTIFIER)
|
||||||
logger.set_min_log_priority_info()
|
logger.set_min_log_priority_info()
|
||||||
@ -28,7 +29,7 @@ def main():
|
|||||||
curr_files.append(f)
|
curr_files.append(f)
|
||||||
|
|
||||||
if len(curr_files) > MAX_CORE_FILES:
|
if len(curr_files) > MAX_CORE_FILES:
|
||||||
curr_files.sort(reverse = True, key = lambda x: datetime.utcfromtimestamp(int(x.split('.')[1])))
|
curr_files.sort(reverse=True, key=lambda x: datetime.utcfromtimestamp(int(x.split('.')[1])))
|
||||||
oldest_core = curr_files[MAX_CORE_FILES]
|
oldest_core = curr_files[MAX_CORE_FILES]
|
||||||
logger.log_info('Deleting {}'.format(oldest_core))
|
logger.log_info('Deleting {}'.format(oldest_core))
|
||||||
try:
|
try:
|
||||||
@ -39,5 +40,6 @@ def main():
|
|||||||
|
|
||||||
logger.log_info('Finished cleaning up core files')
|
logger.log_info('Finished cleaning up core files')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -21,6 +21,8 @@ CRITICAL_PROCESSES_FILE = '/etc/supervisor/critical_processes'
|
|||||||
FEATURE_TABLE_NAME = 'FEATURE'
|
FEATURE_TABLE_NAME = 'FEATURE'
|
||||||
|
|
||||||
# Read the critical processes/group names from CRITICAL_PROCESSES_FILE
|
# Read the critical processes/group names from CRITICAL_PROCESSES_FILE
|
||||||
|
|
||||||
|
|
||||||
def get_critical_group_and_process_list():
|
def get_critical_group_and_process_list():
|
||||||
critical_group_list = []
|
critical_group_list = []
|
||||||
critical_process_list = []
|
critical_process_list = []
|
||||||
@ -29,7 +31,8 @@ def get_critical_group_and_process_list():
|
|||||||
for line in file:
|
for line in file:
|
||||||
line_info = line.strip(' \n').split(':')
|
line_info = line.strip(' \n').split(':')
|
||||||
if len(line_info) != 2:
|
if len(line_info) != 2:
|
||||||
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
syslog.syslog(syslog.LOG_ERR,
|
||||||
|
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||||
sys.exit(5)
|
sys.exit(5)
|
||||||
|
|
||||||
identifier_key = line_info[0].strip()
|
identifier_key = line_info[0].strip()
|
||||||
@ -39,11 +42,13 @@ def get_critical_group_and_process_list():
|
|||||||
elif identifier_key == "program" and identifier_value:
|
elif identifier_key == "program" and identifier_value:
|
||||||
critical_process_list.append(identifier_value)
|
critical_process_list.append(identifier_value)
|
||||||
else:
|
else:
|
||||||
syslog.syslog(syslog.LOG_ERR, "Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
syslog.syslog(syslog.LOG_ERR,
|
||||||
sys.exit(6)
|
"Syntax of the line {} in critical_processes file is incorrect. Exiting...".format(line))
|
||||||
|
sys.exit(6)
|
||||||
|
|
||||||
return critical_group_list, critical_process_list
|
return critical_group_list, critical_process_list
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
container_name = None
|
container_name = None
|
||||||
opts, args = getopt.getopt(argv, "c:", ["container-name="])
|
opts, args = getopt.getopt(argv, "c:", ["container-name="])
|
||||||
@ -90,13 +95,14 @@ def main(argv):
|
|||||||
|
|
||||||
restart_feature = features_table[container_name].get('auto_restart')
|
restart_feature = features_table[container_name].get('auto_restart')
|
||||||
if not restart_feature:
|
if not restart_feature:
|
||||||
syslog.syslog(syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
|
syslog.syslog(
|
||||||
|
syslog.LOG_ERR, "Unable to determine auto-restart feature status for '{}'. Exiting...".format(container_name))
|
||||||
sys.exit(4)
|
sys.exit(4)
|
||||||
|
|
||||||
# If auto-restart feature is not disabled and at the same time
|
# If auto-restart feature is not disabled and at the same time
|
||||||
# a critical process exited unexpectedly, terminate supervisor
|
# a critical process exited unexpectedly, terminate supervisor
|
||||||
if (restart_feature != 'disabled' and expected == 0 and
|
if (restart_feature != 'disabled' and expected == 0 and
|
||||||
(processname in critical_process_list or groupname in critical_group_list)):
|
(processname in critical_process_list or groupname in critical_group_list)):
|
||||||
MSG_FORMAT_STR = "Process {} exited unxepectedly. Terminating supervisor..."
|
MSG_FORMAT_STR = "Process {} exited unxepectedly. Terminating supervisor..."
|
||||||
msg = MSG_FORMAT_STR.format(payload_headers['processname'])
|
msg = MSG_FORMAT_STR.format(payload_headers['processname'])
|
||||||
syslog.syslog(syslog.LOG_INFO, msg)
|
syslog.syslog(syslog.LOG_INFO, msg)
|
||||||
|
@ -11,59 +11,58 @@ chassis_db = 'CHASSIS_APP_DB'
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description=
|
parser = argparse.ArgumentParser(description="Update chassis_db config from database-config.json")
|
||||||
"Update chassis_db config from database-config.json")
|
|
||||||
parser.add_argument("-j", "--json", help="databse-config json file", nargs='?',
|
parser.add_argument("-j", "--json", help="databse-config json file", nargs='?',
|
||||||
const=database_config_file)
|
const=database_config_file)
|
||||||
parser.add_argument("-p", "--port", help="update port number", nargs='?' )
|
parser.add_argument("-p", "--port", help="update port number", nargs='?')
|
||||||
group = parser.add_mutually_exclusive_group()
|
group = parser.add_mutually_exclusive_group()
|
||||||
group.add_argument("-k", "--keep", help="keep configuration", action='store_true' )
|
group.add_argument("-k", "--keep", help="keep configuration", action='store_true')
|
||||||
group.add_argument("-d", "--delete", help="delete configuration", action='store_true' )
|
group.add_argument("-d", "--delete", help="delete configuration", action='store_true')
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
jsonfile = ""
|
jsonfile = ""
|
||||||
if args.json != None:
|
if args.json != None:
|
||||||
jsonfile = args.json
|
jsonfile = args.json
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
if args.port != None:
|
if args.port != None:
|
||||||
port_number = args.port
|
port_number = args.port
|
||||||
else:
|
else:
|
||||||
port_number = ""
|
port_number = ""
|
||||||
if args.keep:
|
if args.keep:
|
||||||
keep_config = True
|
keep_config = True
|
||||||
else:
|
else:
|
||||||
keep_config = False
|
keep_config = False
|
||||||
if args.delete:
|
if args.delete:
|
||||||
delete_config = True
|
delete_config = True
|
||||||
else:
|
else:
|
||||||
delete_config = False
|
delete_config = False
|
||||||
data = {}
|
data = {}
|
||||||
data_keep = {}
|
data_keep = {}
|
||||||
if os.path.isfile(jsonfile):
|
if os.path.isfile(jsonfile):
|
||||||
with open(jsonfile, "r") as read_file:
|
with open(jsonfile, "r") as read_file:
|
||||||
data = json.load(read_file)
|
data = json.load(read_file)
|
||||||
else:
|
else:
|
||||||
syslog.syslog(syslog.LOG_ERR,
|
syslog.syslog(syslog.LOG_ERR,
|
||||||
'config file {} does notexist'.format(jsonfile))
|
'config file {} does notexist'.format(jsonfile))
|
||||||
return
|
return
|
||||||
if 'INSTANCES' in data and redis_chassis in data['INSTANCES']:
|
if 'INSTANCES' in data and redis_chassis in data['INSTANCES']:
|
||||||
data_keep['INSTANCES'] = {}
|
data_keep['INSTANCES'] = {}
|
||||||
data_keep['INSTANCES'][redis_chassis] = data['INSTANCES'][redis_chassis]
|
data_keep['INSTANCES'][redis_chassis] = data['INSTANCES'][redis_chassis]
|
||||||
if delete_config:
|
if delete_config:
|
||||||
del data['INSTANCES'][redis_chassis]
|
del data['INSTANCES'][redis_chassis]
|
||||||
if 'DATABASES' in data and chassis_db in data['DATABASES']:
|
if 'DATABASES' in data and chassis_db in data['DATABASES']:
|
||||||
data_keep['DATABASES'] = {}
|
data_keep['DATABASES'] = {}
|
||||||
data_keep['DATABASES'][chassis_db] = data['DATABASES'][chassis_db]
|
data_keep['DATABASES'][chassis_db] = data['DATABASES'][chassis_db]
|
||||||
if delete_config:
|
if delete_config:
|
||||||
del data['DATABASES'][chassis_db]
|
del data['DATABASES'][chassis_db]
|
||||||
with open(jsonfile, "w") as write_file:
|
with open(jsonfile, "w") as write_file:
|
||||||
data_publish = data_keep if keep_config else data
|
data_publish = data_keep if keep_config else data
|
||||||
if port_number:
|
if port_number:
|
||||||
data_publish['INSTANCES']['redis_chassis']['port'] = int(port_number)
|
data_publish['INSTANCES']['redis_chassis']['port'] = int(port_number)
|
||||||
json.dump(data_publish, write_file, indent=4, separators=(',', ': '))
|
json.dump(data_publish, write_file, indent=4, separators=(',', ': '))
|
||||||
syslog.syslog(syslog.LOG_INFO,
|
syslog.syslog(syslog.LOG_INFO,
|
||||||
'remove chassis_db from config file {}'.format(jsonfile))
|
'remove chassis_db from config file {}'.format(jsonfile))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Loading…
Reference in New Issue
Block a user