[containercfgd] Add containercfgd and syslog rate limit configuration support (#12489)

* [containercfgd] Add containercfgd and syslog rate limit configuration support

* Fix build issue

* Fix checker issue

* Fix review comment

* Fix review comment

* Update containercfgd.py
This commit is contained in:
Junchao-Mellanox 2022-12-09 00:58:35 +08:00 committed by GitHub
parent ffad305fd3
commit 3b3837a636
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 559 additions and 28 deletions

View File

@ -81,7 +81,7 @@ RUN pip3 install supervisor==4.2.1
# Add support for supervisord to handle startup dependencies
RUN pip3 install supervisord-dependent-startup==1.4.0
RUN mkdir -p /etc/supervisor /var/log/supervisor
RUN mkdir -p /var/log/supervisor /etc/supervisor/conf.d
RUN apt-get -y purge \
exim4 \
@ -104,10 +104,10 @@ RUN apt-get clean -y && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/* /tmp/* ~/.cache
COPY ["etc/rsyslog.conf", "/etc/rsyslog.conf"]
COPY ["etc/rsyslog.d/*", "/etc/rsyslog.d/"]
COPY ["root/.vimrc", "/root/.vimrc"]
RUN ln /usr/bin/vim.tiny /usr/bin/vim
COPY ["etc/supervisor/supervisord.conf", "/etc/supervisor/"]
COPY ["etc/supervisor/containercfgd.conf", "/etc/supervisor/conf.d/"]

View File

@ -0,0 +1,9 @@
[program:containercfgd]
command=python3 /usr/local/bin/containercfgd
priority=99
autostart=false
autorestart=unexpected
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -94,7 +94,7 @@ RUN pip3 install supervisor==4.2.1
# Add support for supervisord to handle startup dependencies
RUN pip3 install supervisord-dependent-startup==1.4.0
RUN mkdir -p /etc/supervisor /var/log/supervisor
RUN mkdir -p /var/log/supervisor /etc/supervisor/conf.d
RUN apt-get -y purge \
exim4 \
@ -117,10 +117,10 @@ RUN apt-get clean -y && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/* /tmp/* ~/.cache/
COPY ["etc/rsyslog.conf", "/etc/rsyslog.conf"]
COPY ["etc/rsyslog.d/*", "/etc/rsyslog.d/"]
COPY ["root/.vimrc", "/root/.vimrc"]
RUN ln /usr/bin/vim.tiny /usr/bin/vim
COPY ["etc/supervisor/supervisord.conf", "/etc/supervisor/"]
COPY ["etc/supervisor/containercfgd.conf", "/etc/supervisor/conf.d/"]

View File

@ -0,0 +1,9 @@
[program:containercfgd]
command=python3 /usr/local/bin/containercfgd
priority=99
autostart=false
autorestart=unexpected
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -90,7 +90,7 @@ RUN pip install supervisor>=3.4.0
# Add support for supervisord to handle startup dependencies
RUN pip install supervisord-dependent-startup==1.4.0
RUN mkdir -p /etc/supervisor /var/log/supervisor
RUN mkdir -p /var/log/supervisor /etc/supervisor/conf.d
RUN apt-get -y purge \
exim4 \
@ -113,10 +113,10 @@ RUN apt-get clean -y && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/* /tmp/*
COPY ["etc/rsyslog.conf", "/etc/rsyslog.conf"]
COPY ["etc/rsyslog.d/*", "/etc/rsyslog.d/"]
COPY ["root/.vimrc", "/root/.vimrc"]
RUN ln /usr/bin/vim.tiny /usr/bin/vim
COPY ["etc/supervisor/supervisord.conf", "/etc/supervisor/"]
COPY ["etc/supervisor/containercfgd.conf", "/etc/supervisor/conf.d/"]

View File

@ -0,0 +1,9 @@
[program:containercfgd]
command=python /usr/local/bin/containercfgd
priority=99
autostart=false
autorestart=unexpected
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -49,7 +49,6 @@ RUN apt-get -y install \
rsyslog \
less
COPY ["etc/rsyslog.conf", "/etc/rsyslog.conf"]
COPY ["etc/rsyslog.d/*", "/etc/rsyslog.d/"]
COPY ["root/.vimrc", "/root/.vimrc"]
@ -64,10 +63,11 @@ RUN pip install wheel
# Install supervisor
RUN pip install supervisor>=3.4.0
RUN mkdir -p /etc/supervisor
RUN mkdir -p /etc/supervisor/conf.d
RUN mkdir -p /var/log/supervisor
COPY ["etc/supervisor/supervisord.conf", "/etc/supervisor/"]
COPY ["etc/supervisor/containercfgd.conf", "/etc/supervisor/conf.d/"]
RUN apt-get -y purge \
exim4 \

View File

@ -0,0 +1,9 @@
[program:containercfgd]
command=python /usr/local/bin/containercfgd
priority=99
autostart=false
autorestart=unexpected
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -3,6 +3,15 @@ logfile_maxbytes=1MB
logfile_backups=2
nodaemon=true
[eventlistener:dependent-startup]
command=python3 -m supervisord_dependent_startup
autostart=true
autorestart=unexpected
startretries=0
exitcodes=0,3
events=PROCESS_STATE
buffer_size=1024
[eventlistener:supervisor-proc-exit-listener]
command=/usr/bin/supervisor-proc-exit-listener --container-name database
events=PROCESS_STATE_EXITED,PROCESS_STATE_RUNNING
@ -13,10 +22,11 @@ buffer_size=1024
[program:rsyslogd]
command=/usr/sbin/rsyslogd -n -iNONE
priority=1
autostart=true
autostart=false
autorestart=false
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
{% if INSTANCES %}
{% for redis_inst, redis_items in INSTANCES.items() %}
@ -28,17 +38,21 @@ stderr_logfile=syslog
{%- endif -%}
command=/bin/bash -c "{ [[ -s /var/lib/{{ redis_inst }}/dump.rdb ]] || rm -f /var/lib/{{ redis_inst }}/dump.rdb; } && mkdir -p /var/lib/{{ redis_inst }} && exec /usr/bin/redis-server /etc/redis/redis.conf --bind {{ LOOPBACK_IP }} {{ redis_items['hostname'] }} --port {{ redis_items['port'] }} --unixsocket {{ redis_items['unix_socket_path'] }} --pidfile /var/run/redis/{{ redis_inst }}.pid --dir /var/lib/{{ redis_inst }}"
priority=2
autostart=true
autostart=false
autorestart=false
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running
{% endfor %}
{% endif %}
[program:flushdb]
command=/bin/bash -c "sleep 300 && /usr/local/bin/flush_unused_database"
priority=3
autostart=true
autostart=false
autorestart=false
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -34,16 +34,23 @@ function updateSyslogConf()
# Also update the container name
if [[ ($NUM_ASIC -gt 1) ]]; then
TARGET_IP=$(docker network inspect bridge --format={{ "'{{(index .IPAM.Config 0).Gateway}}'" }})
else
if [ "$CONTAINER_EXISTS" = "yes" ]; then
# database configuration has been synced to /etc/rsyslog.conf
# no need generate it to save boot time
return
fi
TARGET_IP="127.0.0.1"
fi
CONTAINER_NAME="$DOCKERNAME"
TMP_FILE="/tmp/rsyslog.$CONTAINER_NAME.conf"
{%- if docker_container_name == "database" %}
python -c "import jinja2, os; paths=['/usr/share/sonic/templates']; loader = jinja2.FileSystemLoader(paths); env = jinja2.Environment(loader=loader, trim_blocks=True); template_file='/usr/share/sonic/templates/rsyslog-container.conf.j2'; template = env.get_template(os.path.basename(template_file)); data=template.render({\"target_ip\":\"$TARGET_IP\",\"container_name\":\"$CONTAINER_NAME\"}); print(data)" > $TMP_FILE
{%- else %}
sonic-cfggen -t /usr/share/sonic/templates/rsyslog-container.conf.j2 -a "{\"target_ip\": \"$TARGET_IP\", \"container_name\": \"$CONTAINER_NAME\" }" > $TMP_FILE
sonic-cfggen -d -t /usr/share/sonic/templates/rsyslog-container.conf.j2 -a "{\"target_ip\": \"$TARGET_IP\", \"container_name\": \"$CONTAINER_NAME\" }" > $TMP_FILE
{%- endif %}
docker cp $TMP_FILE ${DOCKERNAME}:/etc/rsyslog.conf
rm -rf $TMP_FILE
fi
}
function ebtables_config()
{
@ -342,6 +349,7 @@ start() {
DOCKERMOUNT=`getMountPoint "$DOCKERCHECK"`
{%- endif %}
if [ x"$DOCKERMOUNT" == x"$MOUNTPATH" ]; then
CONTAINER_EXISTS="yes"
preStartAction
{%- if docker_container_name == "database" %}
echo "Starting existing ${DOCKERNAME} container"
@ -536,6 +544,7 @@ start() {
{%- endif %}
$REDIS_MNT \
-v /usr/share/sonic/device/$PLATFORM:/usr/share/sonic/platform:ro \
-v /usr/share/sonic/templates/rsyslog-container.conf.j2:/usr/share/sonic/templates/rsyslog-container.conf.j2:ro \
{%- if sonic_asic_platform != "mellanox" %}
{%- if mount_default_tmpfs|default("n") == "y" %}
--tmpfs /tmp \
@ -547,6 +556,7 @@ start() {
--env "NAMESPACE_ID"="$DEV" \
--env "NAMESPACE_PREFIX"="$NAMESPACE_PREFIX" \
--env "NAMESPACE_COUNT"=$NUM_ASIC \
--env "CONTAINER_NAME"=$DOCKERNAME \
--name=$DOCKERNAME \
{%- if docker_container_name == "gbsyncd" %}
-v /var/run/docker-syncd$DEV:/var/run/sswsyncd \
@ -617,6 +627,7 @@ fi
{%- endif %}
NAMESPACE_PREFIX="asic"
DOCKERNAME=$DOCKERNAME$DEV
CONTAINER_EXISTS="no"
if [ "$DEV" ]; then
NET_NS="$NAMESPACE_PREFIX$DEV" #name of the network namespace

View File

@ -14,8 +14,24 @@ $ModLoad imuxsock # provides support for local system logging
#
# Set a rate limit on messages from the container
#
$SystemLogRateLimitInterval 300
$SystemLogRateLimitBurst 20000
{% if SYSLOG_CONFIG_FEATURE is defined %}
{% if container_name in SYSLOG_CONFIG_FEATURE %}
{% if 'rate_limit_interval' in SYSLOG_CONFIG_FEATURE[container_name]%}
{% set rate_limit_interval = SYSLOG_CONFIG_FEATURE[container_name]['rate_limit_interval'] %}
{% endif %}
{% if 'rate_limit_burst' in SYSLOG_CONFIG_FEATURE[container_name]%}
{% set rate_limit_burst = SYSLOG_CONFIG_FEATURE[container_name]['rate_limit_burst'] %}
{% endif %}
{% endif %}
{% endif %}
{% if rate_limit_interval is defined %}
$SystemLogRateLimitInterval {{ rate_limit_interval }}
{% endif %}
{% if rate_limit_burst is defined %}
$SystemLogRateLimitBurst {{ rate_limit_burst }}
{% endif %}
#$ModLoad imklog # provides kernel logging support
#$ModLoad immark # provides --MARK-- message capability

View File

@ -12,7 +12,8 @@ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DEPENDS += $(LIBSWSSCOMMON) \
$(SONIC_EVENTD)
$(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \
$(SONIC_YANG_MGMT_PY3) \
$(SONIC_YANG_MODELS_PY3)
$(SONIC_YANG_MODELS_PY3) \
$(SONIC_CONTAINERCFGD)
$(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3)
$(DOCKER_CONFIG_ENGINE_BULLSEYE)_LOAD_DOCKERS += $(DOCKER_BASE_BULLSEYE)
$(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(SWSS_VARS_TEMPLATE)

View File

@ -11,7 +11,8 @@ $(DOCKER_CONFIG_ENGINE_BUSTER)_DEPENDS += $(LIBSWSSCOMMON) \
$(SONIC_DB_CLI)
$(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \
$(SONIC_YANG_MGMT_PY3) \
$(SONIC_YANG_MODELS_PY3)
$(SONIC_YANG_MODELS_PY3) \
$(SONIC_CONTAINERCFGD)
$(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3)
$(DOCKER_CONFIG_ENGINE_BUSTER)_LOAD_DOCKERS += $(DOCKER_BASE_BUSTER)
$(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $(SWSS_VARS_TEMPLATE)

View File

@ -0,0 +1,10 @@
SPATH := $($(SONIC_CONTAINERCFGD)_SRC_PATH)
DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/sonic-containercfgd.mk rules/sonic-containercfgd.dep
DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST)
SMDEP_FILES := $(addprefix $(SPATH)/,$(shell cd $(SPATH) && git ls-files))
$(SONIC_CONTAINERCFGD)_CACHE_MODE := GIT_CONTENT_SHA
$(SONIC_CONTAINERCFGD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST)
$(SONIC_CONTAINERCFGD)_DEP_FILES := $(DEP_FILES)
$(SONIC_CONTAINERCFGD)_SMDEP_FILES := $(SMDEP_FILES)
$(SONIC_CONTAINERCFGD)_SMDEP_PATHS := $(SPATH)

View File

@ -0,0 +1,8 @@
# sonic-bgpcfgd package
SONIC_CONTAINERCFGD = sonic_containercfgd-1.0-py3-none-any.whl
$(SONIC_CONTAINERCFGD)_SRC_PATH = $(SRC_PATH)/sonic-containercfgd
$(SONIC_CONTAINERCFGD)_DEPENDS += $(SONIC_PY_COMMON_PY3)
$(SONIC_CONTAINERCFGD)_DEBS_DEPENDS = $(LIBSWSSCOMMON) $(PYTHON3_SWSSCOMMON)
$(SONIC_CONTAINERCFGD)_PYTHON_VERSION = 3
SONIC_PYTHON_WHEELS += $(SONIC_CONTAINERCFGD)

13
src/sonic-containercfgd/.gitignore vendored Normal file
View File

@ -0,0 +1,13 @@
# Compiled Python files
*.pyc
# Generated by packaging
*.egg-info/
.eggs/
build/
dist/
# Unit test coverage
.coverage
coverage.xml
htmlcov/

View File

@ -0,0 +1,198 @@
import os
import re
import signal
import subprocess
import sys
from sonic_py_common import daemon_base, logger
from swsscommon.swsscommon import ConfigDBConnector, RestartWaiter
SYSLOG_IDENTIFIER = "containercfgd"
logger = logger.Logger(SYSLOG_IDENTIFIER)
# Table names
FEATURE_TABLE = 'FEATURE'
SYSLOG_CONFIG_FEATURE_TABLE = 'SYSLOG_CONFIG_FEATURE'
# Table field names
SYSLOG_RATE_LIMIT_INTERVAL = 'rate_limit_interval'
SYSLOG_RATE_LIMIT_BURST = 'rate_limit_burst'
# Container name
container_name = None
def run_command(command):
"""
Utility function to run an shell command and return the output.
:param command: Shell command string.
:return: Output of the shell command.
"""
return subprocess.check_output(command, text=True, stderr=subprocess.PIPE)
class ContainerConfigDaemon(daemon_base.DaemonBase):
handlers = {}
def __init__(self):
super(ContainerConfigDaemon, self).__init__(SYSLOG_IDENTIFIER)
def run(self):
"""Register config handlers and listen to CONFIG DB changes
"""
config_db = ConfigDBConnector()
config_db.connect(wait_for_init=True, retry_on=True)
self.log_notice(f'Connected to CONFIG DB')
for table_name, handler in self.handlers.items():
config_db.subscribe(table_name, handler.handle_config)
config_db.listen(init_data_handler=self.init_data_handler)
def init_data_handler(self, init_data):
"""Handle initial data in CONFIG DB
Args:
init_data (dict): Initial data when first time connecting to CONFIG DB. {<table_name>: {<field_name>: <field_value>}}
"""
for handler in self.handlers.values():
handler.handle_init_data(init_data)
@classmethod
def register_handler(cls, table_name, object_type):
"""Register CONFIG DB handler
Args:
table_name (str): CONFIG DB table name
object_type (class): Class of CONFIG DB handler
"""
cls.handlers[table_name] = object_type()
def signal_handler(self, sig, frame):
if sig == signal.SIGHUP:
self.log_info("ContainerCfgd: Caught SIGHUP - ignoring...")
elif sig == signal.SIGINT:
self.log_info("ContainerCfgd: Caught SIGINT - exiting...")
sys.exit(128 + sig)
elif sig == signal.SIGTERM:
self.log_info("ContainerCfgd: Caught SIGTERM - exiting...")
sys.exit(128 + sig)
else:
self.log_warning("ContainerCfgd: Caught unhandled signal '{}'".format(sig))
def config_handler(table_name):
"""Decorator to register CONFIG DB handler
Args:
table_name (str): CONFIG DB table name
"""
def wrapper(object_type):
ContainerConfigDaemon.register_handler(table_name, object_type)
return object_type
return wrapper
@config_handler(SYSLOG_CONFIG_FEATURE_TABLE)
class SyslogHandler:
# syslog conf file path in docker
SYSLOG_CONF_PATH = '/etc/rsyslog.conf'
# temporary syslog conf file path in docker
TMP_SYSLOG_CONF_PATH = '/tmp/rsyslog.conf'
# Regular expressions to extract value from rsyslog.conf
INTERVAL_PATTERN = '.*SystemLogRateLimitInterval\s+(\d+).*'
BURST_PATTERN = '.*SystemLogRateLimitBurst\s+(\d+).*'
TARGET_IP_PATTERN = '.*target="(.*?)".*'
def __init__(self):
self.current_interval, self.current_burst, self.target_ip = self.parse_syslog_conf()
def handle_config(self, table, key, data):
"""Handle CONFIG DB change. Callback by ConfigDBConnector.
Args:
table (str): CONFIG DB table name
key (str): Key of the changed entry
data (dict): Data of the entry: {<field_name>: <field_value>}
"""
try:
if key != container_name:
return
self.update_syslog_config(data)
except Exception as e:
logger.log_error('Failed to config syslog for container {} with data {} - {}'.format(key, data, e))
def handle_init_data(self, init_data):
"""Handle initial data in CONFIG DB. Callback by ConfigDBConnector.
Args:
init_data (dict): Initial data when first time connecting to CONFIG DB. {<table_name>: {<field_name>: <field_value>}}
"""
if SYSLOG_CONFIG_FEATURE_TABLE in init_data:
if container_name in init_data[SYSLOG_CONFIG_FEATURE_TABLE]:
self.update_syslog_config(init_data[SYSLOG_CONFIG_FEATURE_TABLE][container_name])
def update_syslog_config(self, data):
"""Parse existing syslog conf and apply new syslog conf.
Args:
data (dict): Data of the entry: {<field_name>: <field_value>}
"""
new_interval = '0' if not data else data.get(SYSLOG_RATE_LIMIT_INTERVAL, '0')
new_burst = '0' if not data else data.get(SYSLOG_RATE_LIMIT_BURST, '0')
if new_interval == self.current_interval and new_burst == self.current_burst:
logger.log_notice('Syslog rate limit configuration does not change, ignore it')
return
logger.log_notice(f'Configure syslog rate limit interval={new_interval}, burst={new_burst}')
if os.path.exists(self.TMP_SYSLOG_CONF_PATH):
os.remove(self.TMP_SYSLOG_CONF_PATH)
with open(self.TMP_SYSLOG_CONF_PATH, 'w+') as f:
json_args = f'{{"target_ip": "127.0.0.1", "{self.target_ip}": "{container_name}" }}'
output = run_command(['sonic-cfggen', '-d', '-t', '/usr/share/sonic/templates/rsyslog-container.conf.j2', '-a', json_args])
f.write(output)
run_command(['cp', self.TMP_SYSLOG_CONF_PATH, self.SYSLOG_CONF_PATH])
run_command(['supervisorctl', 'restart', 'rsyslogd'])
self.current_interval = new_interval
self.current_burst = new_burst
def parse_syslog_conf(self):
"""Passe existing syslog conf and extract config values
Returns:
tuple: interval,burst,target_ip
"""
interval = '0'
burst = '0'
target_ip = None
with open(self.SYSLOG_CONF_PATH, 'r') as f:
content = f.read()
pattern = re.compile(self.INTERVAL_PATTERN)
for match in pattern.finditer(content):
interval = match.group(1)
break
pattern = re.compile(self.BURST_PATTERN)
for match in pattern.finditer(content):
burst = match.group(1)
break
pattern = re.compile(self.TARGET_IP_PATTERN)
for match in pattern.finditer(content):
target_ip = match.group(1)
break
return interval, burst, target_ip
def main():
RestartWaiter.waitAdvancedBootDone()
global container_name
container_name = os.environ['CONTAINER_NAME']
daemon = ContainerConfigDaemon()
daemon.run()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,2 @@
[pytest]
addopts = --cov=containercfgd --cov-report html --cov-report term --cov-report xml

View File

@ -0,0 +1,2 @@
[aliases]
test=pytest

View File

@ -0,0 +1,48 @@
from setuptools import setup
dependencies = [
'sonic_py_common',
]
setup(
name='sonic-containercfgd',
version='1.0',
description='SONiC container config daemon package',
license='Apache 2.0',
author='SONiC Team',
author_email='linuxnetdev@microsoft.com',
url='https://github.com/Azure/sonic-buildimage',
maintainer='Junchao Chen',
maintainer_email='junchaow@nvidia.com',
install_requires=dependencies,
entry_points={
'console_scripts': [
'containercfgd = containercfgd.containercfgd:main',
]
},
packages=[
'containercfgd',
'tests'
],
setup_requires=[
'pytest-runner'
],
tests_require=[
'pytest',
'mock>=2.0.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Hardware',
],
keywords='SONiC sonic container config daemon',
test_suite='setup.get_test_suite'
)

View File

@ -0,0 +1,7 @@
import os
def init_env():
# Just make sure there is an environment variable CONTAINER_NAME
# The value of this environment variable is not important
os.environ["CONTAINER_NAME"] = "some_container"

View File

@ -0,0 +1,29 @@
$ModLoad imuxsock # provides support for local system logging
#
# Set a rate limit on messages from the container
#
$SystemLogRateLimitInterval 50
$SystemLogRateLimitBurst 10002
#$ModLoad imklog # provides kernel logging support
#$ModLoad immark # provides --MARK-- message capability
# provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
# provides TCP syslog reception
#$ModLoad imtcp
#$InputTCPServerRun 514
###########################
#### GLOBAL DIRECTIVES ####
###########################
# Set remote syslog server
template (name="ForwardFormatInContainer" type="string" string="<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% pmon#%syslogtag%%msg:::sp-if-no-1st-sp%%msg%")
*.* action(type="omfwd" target="127.0.0.1" port="514" protocol="udp" Template="ForwardFormatInContainer")

View File

@ -0,0 +1,54 @@
import os
import sys
from unittest import mock
test_path = os.path.dirname(os.path.abspath(__file__))
modules_path = os.path.dirname(test_path)
sys.path.insert(0, modules_path)
from containercfgd import containercfgd
@containercfgd.config_handler('MockTable')
class MockHandler:
def handle_init_data(self, init_data):
pass
def handle_config(self, table, key, data):
pass
def test_handler_register():
assert 'MockTable' in containercfgd.ContainerConfigDaemon.handlers
assert isinstance(containercfgd.ContainerConfigDaemon.handlers['MockTable'], MockHandler)
def test_init_data_handler():
mock_handler_cls = mock.MagicMock()
mock_handler_instance = mock.MagicMock()
mock_handler_instance.handle_init_data = mock.MagicMock()
mock_handler_cls.return_value = mock_handler_instance
containercfgd.ContainerConfigDaemon.register_handler('LoadMock', mock_handler_cls)
daemon = containercfgd.ContainerConfigDaemon()
daemon.init_data_handler({})
mock_handler_instance.handle_init_data.assert_called_once()
containercfgd.ContainerConfigDaemon.handlers.pop('LoadMock')
@mock.patch('containercfgd.containercfgd.ConfigDBConnector')
def test_run(mock_connector):
mock_db = mock.MagicMock()
mock_db.connect = mock.MagicMock()
mock_db.subscribe = mock.MagicMock()
mock_db.listen = mock.MagicMock()
mock_connector.return_value = mock_db
daemon = containercfgd.ContainerConfigDaemon()
daemon.run()
mock_db.connect.assert_called_once()
expected = []
for table_name, handler in containercfgd.ContainerConfigDaemon.handlers.items():
expected.append(mock.call(table_name, handler.handle_config))
mock_db.subscribe.assert_has_calls(expected, any_order=True)
mock_db.listen.assert_called_once()

View File

@ -0,0 +1,81 @@
import os
import sys
from unittest import mock
test_path = os.path.dirname(os.path.abspath(__file__))
modules_path = os.path.dirname(test_path)
sys.path.insert(0, modules_path)
from containercfgd import containercfgd
containercfgd.container_name = 'swss'
def test_handle_config():
handler = containercfgd.SyslogHandler()
handler.update_syslog_config = mock.MagicMock()
handler.handle_config(containercfgd.SYSLOG_CONFIG_FEATURE_TABLE,
'bgp',
None)
handler.update_syslog_config.assert_not_called()
handler.handle_config(containercfgd.SYSLOG_CONFIG_FEATURE_TABLE,
'swss',
None)
handler.update_syslog_config.assert_called_once()
handler.update_syslog_config.side_effect = Exception('')
handler.handle_config(containercfgd.SYSLOG_CONFIG_FEATURE_TABLE,
'swss',
None)
def test_handle_init_data():
handler = containercfgd.SyslogHandler()
handler.update_syslog_config = mock.MagicMock()
init_data = {}
handler.handle_init_data(init_data)
handler.update_syslog_config.assert_not_called()
init_data = {containercfgd.SYSLOG_CONFIG_FEATURE_TABLE: {}}
handler.handle_init_data(init_data)
handler.update_syslog_config.assert_not_called()
init_data = {containercfgd.SYSLOG_CONFIG_FEATURE_TABLE: {'swss': {}}}
handler.handle_init_data(init_data)
handler.update_syslog_config.assert_called_once()
@mock.patch('containercfgd.containercfgd.run_command')
@mock.patch('containercfgd.containercfgd.SyslogHandler.parse_syslog_conf', mock.MagicMock(return_value=('100', '200', '127.0.0.1')))
def test_update_syslog_config(mock_run_cmd):
mock_run_cmd.return_value = ""
handler = containercfgd.SyslogHandler()
data = {containercfgd.SYSLOG_RATE_LIMIT_INTERVAL: '100',
containercfgd.SYSLOG_RATE_LIMIT_BURST: '200'}
handler.update_syslog_config(data)
mock_run_cmd.assert_not_called()
data = {containercfgd.SYSLOG_RATE_LIMIT_INTERVAL: '200',
containercfgd.SYSLOG_RATE_LIMIT_BURST: '200'}
handler.update_syslog_config(data)
mock_run_cmd.assert_called()
def test_parse_syslog_conf():
handler = containercfgd.SyslogHandler()
handler.SYSLOG_CONF_PATH = os.path.join(test_path, 'mock_rsyslog.conf')
interval, burst, target_ip = handler.parse_syslog_conf()
assert interval == '50'
assert burst == '10002'
assert target_ip == '127.0.0.1'
handler.SYSLOG_CONF_PATH = os.path.join(test_path, 'mock_empty_rsyslog.conf')
interval, burst, target_ip = handler.parse_syslog_conf()
assert interval == '0'
assert burst == '0'
assert target_ip is None