[quagga]: update quagga submodule (#1698)
* [quagga]: update quagga submodule 0bc6bd6 2018-05-11 | ignore nexthop attribute when NLRI is present (#18) (HEAD, origin/debian/0.99.24.1, origin/HEAD) [lguohan] Signed-off-by: Guohan Lu <gulv@microsoft.com> * add vs bgp test Signed-off-by: Guohan Lu <gulv@microsoft.com>
This commit is contained in:
parent
e900369fa3
commit
931b5dee13
@ -44,7 +44,7 @@ sw-srv0 (id: 5)
|
||||
2. Start sonic virtual switch docker
|
||||
|
||||
```
|
||||
$ docker run --privileged --network container:sw -d docker-sonic-vs
|
||||
$ docker run --privileged --network container:sw --name vs -d docker-sonic-vs
|
||||
```
|
||||
|
||||
3. Setup IP in the virtual switch docker
|
||||
|
4
platform/vs/tests/README.md
Normal file
4
platform/vs/tests/README.md
Normal file
@ -0,0 +1,4 @@
|
||||
Requirements:
|
||||
|
||||
- Enable IPv6 for docker engine
|
||||
- pip install exabgp
|
7
platform/vs/tests/bgp/files/bgpd.conf
Normal file
7
platform/vs/tests/bgp/files/bgpd.conf
Normal file
@ -0,0 +1,7 @@
|
||||
router bgp 65501
|
||||
bgp router-id 1.1.1.1
|
||||
no bgp default ipv4-unicast
|
||||
neighbor fc00::2 remote-as 65502
|
||||
address-family ipv6
|
||||
neighbor fc00::2 activate
|
||||
exit-address-family
|
19
platform/vs/tests/bgp/files/invalid_nexthop.conf
Normal file
19
platform/vs/tests/bgp/files/invalid_nexthop.conf
Normal file
@ -0,0 +1,19 @@
|
||||
neighbor fc00::1 {
|
||||
router-id 1.2.3.4;
|
||||
local-address fc00::2;
|
||||
local-as 65502;
|
||||
peer-as 65501;
|
||||
group-updates false;
|
||||
|
||||
family {
|
||||
ipv4 unicast;
|
||||
ipv6 unicast;
|
||||
}
|
||||
|
||||
static {
|
||||
route 3333::0/64 {
|
||||
next-hop 0.0.0.0;
|
||||
next-hop fc00::2;
|
||||
}
|
||||
}
|
||||
}
|
32
platform/vs/tests/bgp/test_invalid_nexthop.py
Normal file
32
platform/vs/tests/bgp/test_invalid_nexthop.py
Normal file
@ -0,0 +1,32 @@
|
||||
from swsscommon import swsscommon
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
|
||||
def test_InvalidNexthop(dvs):
|
||||
|
||||
dvs.copy_file("/etc/quagga/", "bgp/files/bgpd.conf")
|
||||
dvs.runcmd("supervisorctl start bgpd")
|
||||
dvs.runcmd("ip addr add fc00::1/126 dev Ethernet0")
|
||||
dvs.runcmd("ifconfig Ethernet0 up")
|
||||
|
||||
dvs.servers[0].runcmd("ip addr add fc00::2/126 dev eth0")
|
||||
dvs.servers[0].runcmd("ifconfig eth0 up")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
print dvs.runcmd("supervisorctl status")
|
||||
|
||||
p = dvs.servers[0].runcmd_async("exabgp -d bgp/files/invalid_nexthop.conf")
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
output = dvs.runcmd(["vtysh", "-c", "show ipv6 bgp"])
|
||||
|
||||
p.terminate()
|
||||
p = p.wait()
|
||||
|
||||
print output
|
||||
|
||||
assert "3333::/64" in output
|
231
platform/vs/tests/conftest.py
Normal file
231
platform/vs/tests/conftest.py
Normal file
@ -0,0 +1,231 @@
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
import docker
|
||||
import pytest
|
||||
import commands
|
||||
import tarfile
|
||||
import StringIO
|
||||
import subprocess
|
||||
from swsscommon import swsscommon
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--dvsname", action="store", default=None,
|
||||
help="dvs name")
|
||||
|
||||
class AsicDbValidator(object):
|
||||
def __init__(self, dvs):
|
||||
self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0)
|
||||
|
||||
# get default dot1q vlan id
|
||||
atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
|
||||
|
||||
keys = atbl.getKeys()
|
||||
assert len(keys) == 1
|
||||
self.default_vlan_id = keys[0]
|
||||
|
||||
# build port oid to front port name mapping
|
||||
self.portoidmap = {}
|
||||
self.portnamemap = {}
|
||||
self.hostifoidmap = {}
|
||||
self.hostifnamemap = {}
|
||||
atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF")
|
||||
keys = atbl.getKeys()
|
||||
|
||||
assert len(keys) == 32
|
||||
for k in keys:
|
||||
(status, fvs) = atbl.get(k)
|
||||
|
||||
assert status == True
|
||||
|
||||
for fv in fvs:
|
||||
if fv[0] == "SAI_HOSTIF_ATTR_OBJ_ID":
|
||||
port_oid = fv[1]
|
||||
elif fv[0] == "SAI_HOSTIF_ATTR_NAME":
|
||||
port_name = fv[1]
|
||||
|
||||
self.portoidmap[port_oid] = port_name
|
||||
self.portnamemap[port_name] = port_oid
|
||||
self.hostifoidmap[k] = port_name
|
||||
self.hostifnamemap[port_name] = k
|
||||
|
||||
# get default acl table and acl rules
|
||||
atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE")
|
||||
keys = atbl.getKeys()
|
||||
|
||||
assert len(keys) == 1
|
||||
self.default_acl_table = keys[0]
|
||||
|
||||
atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
|
||||
keys = atbl.getKeys()
|
||||
|
||||
assert len(keys) == 2
|
||||
self.default_acl_entries = keys
|
||||
|
||||
class VirtualServer(object):
|
||||
def __init__(self, ctn_name, pid, i):
|
||||
self.nsname = "%s-srv%d" % (ctn_name, i)
|
||||
self.vifname = "vEthernet%d" % (i * 4)
|
||||
self.cleanup = True
|
||||
|
||||
# create netns
|
||||
if os.path.exists("/var/run/netns/%s" % self.nsname):
|
||||
self.cleanup = False
|
||||
else:
|
||||
os.system("ip netns add %s" % self.nsname)
|
||||
|
||||
# create vpeer link
|
||||
os.system("ip link add %s type veth peer name %s" % (self.nsname[0:12], self.vifname))
|
||||
os.system("ip link set %s netns %s" % (self.nsname[0:12], self.nsname))
|
||||
os.system("ip link set %s netns %d" % (self.vifname, pid))
|
||||
|
||||
# bring up link in the virtual server
|
||||
os.system("ip netns exec %s ip link set dev %s name eth0" % (self.nsname, self.nsname[0:12]))
|
||||
os.system("ip netns exec %s ip link set dev eth0 up" % (self.nsname))
|
||||
os.system("ip netns exec %s ethtool -K eth0 tx off" % (self.nsname))
|
||||
|
||||
# bring up link in the virtual switch
|
||||
os.system("nsenter -t %d -n ip link set dev %s up" % (pid, self.vifname))
|
||||
|
||||
def __del__(self):
|
||||
if self.cleanup:
|
||||
os.system("ip netns delete %s" % self.nsname)
|
||||
|
||||
def runcmd(self, cmd):
|
||||
os.system("ip netns exec %s %s" % (self.nsname, cmd))
|
||||
|
||||
def runcmd_async(self, cmd):
|
||||
return subprocess.Popen("ip netns exec %s %s" % (self.nsname, cmd), shell=True)
|
||||
|
||||
class DockerVirtualSwitch(object):
|
||||
def __init__(self, name=None):
|
||||
self.pnames = ['fpmsyncd',
|
||||
'intfmgrd',
|
||||
'intfsyncd',
|
||||
'neighsyncd',
|
||||
'orchagent',
|
||||
'portsyncd',
|
||||
'redis-server',
|
||||
'rsyslogd',
|
||||
'syncd',
|
||||
'teamsyncd',
|
||||
'vlanmgrd',
|
||||
'zebra']
|
||||
self.mount = "/var/run/redis-vs"
|
||||
self.redis_sock = self.mount + '/' + "redis.sock"
|
||||
self.client = docker.from_env()
|
||||
|
||||
self.ctn = None
|
||||
self.cleanup = True
|
||||
if name != None:
|
||||
# get virtual switch container
|
||||
for ctn in self.client.containers.list():
|
||||
if ctn.name == name:
|
||||
self.ctn = ctn
|
||||
(status, output) = commands.getstatusoutput("docker inspect --format '{{.HostConfig.NetworkMode}}' %s" % name)
|
||||
ctn_sw_id = output.split(':')[1]
|
||||
self.cleanup = False
|
||||
if self.ctn == None:
|
||||
raise NameError("cannot find container %s" % name)
|
||||
|
||||
# get base container
|
||||
for ctn in self.client.containers.list():
|
||||
if ctn.id == ctn_sw_id or ctn.name == ctn_sw_id:
|
||||
ctn_sw_name = ctn.name
|
||||
|
||||
(status, output) = commands.getstatusoutput("docker inspect --format '{{.State.Pid}}' %s" % ctn_sw_name)
|
||||
self.ctn_sw_pid = int(output)
|
||||
|
||||
# create virtual servers
|
||||
self.servers = []
|
||||
for i in range(32):
|
||||
server = VirtualServer(ctn_sw_name, self.ctn_sw_pid, i)
|
||||
self.servers.append(server)
|
||||
|
||||
self.restart()
|
||||
else:
|
||||
self.ctn_sw = self.client.containers.run('debian:jessie', privileged=True, detach=True,
|
||||
command="bash", stdin_open=True)
|
||||
(status, output) = commands.getstatusoutput("docker inspect --format '{{.State.Pid}}' %s" % self.ctn_sw.name)
|
||||
self.ctn_sw_pid = int(output)
|
||||
|
||||
# create virtual server
|
||||
self.servers = []
|
||||
for i in range(32):
|
||||
server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i)
|
||||
self.servers.append(server)
|
||||
|
||||
# create virtual switch container
|
||||
self.ctn = self.client.containers.run('docker-sonic-vs', privileged=True, detach=True,
|
||||
network_mode="container:%s" % self.ctn_sw.name,
|
||||
volumes={ self.mount: { 'bind': '/var/run/redis', 'mode': 'rw' } })
|
||||
|
||||
self.ctn.exec_run("sysctl -w net.ipv6.conf.all.disable_ipv6=0")
|
||||
self.check_ready()
|
||||
self.init_asicdb_validator()
|
||||
|
||||
def destroy(self):
|
||||
if self.cleanup:
|
||||
self.ctn.remove(force=True)
|
||||
self.ctn_sw.remove(force=True)
|
||||
for s in self.servers:
|
||||
del(s)
|
||||
|
||||
def check_ready(self, timeout=30):
|
||||
'''check if all processes in the dvs is ready'''
|
||||
|
||||
re_space = re.compile('\s+')
|
||||
process_status = {}
|
||||
ready = False
|
||||
started = 0
|
||||
while True:
|
||||
# get process status
|
||||
out = self.ctn.exec_run("supervisorctl status")
|
||||
for l in out.split('\n'):
|
||||
fds = re_space.split(l)
|
||||
if len(fds) < 2:
|
||||
continue
|
||||
process_status[fds[0]] = fds[1]
|
||||
|
||||
# check if all processes are running
|
||||
ready = True
|
||||
for pname in self.pnames:
|
||||
try:
|
||||
if process_status[pname] != "RUNNING":
|
||||
ready = False
|
||||
except KeyError:
|
||||
ready = False
|
||||
|
||||
if ready == True:
|
||||
break
|
||||
|
||||
started += 1
|
||||
if started > timeout:
|
||||
raise ValueError(out)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
def restart(self):
|
||||
self.ctn.restart()
|
||||
|
||||
def init_asicdb_validator(self):
|
||||
self.asicdb = AsicDbValidator(self)
|
||||
|
||||
def runcmd(self, cmd):
|
||||
return self.ctn.exec_run(cmd)
|
||||
|
||||
def copy_file(self, path, filename):
|
||||
tarstr = StringIO.StringIO()
|
||||
tar = tarfile.open(fileobj=tarstr, mode="w")
|
||||
tar.add(filename, os.path.basename(filename))
|
||||
tar.close()
|
||||
self.ctn.put_archive(path, tarstr.getvalue())
|
||||
tarstr.close()
|
||||
|
||||
@pytest.yield_fixture(scope="module")
|
||||
def dvs(request):
|
||||
name = request.config.getoption("--dvsname")
|
||||
dvs = DockerVirtualSwitch(name)
|
||||
yield dvs
|
||||
dvs.destroy()
|
@ -1 +1 @@
|
||||
Subproject commit e1217a5269990926dff30624063975673b1da065
|
||||
Subproject commit 0bc6bd6b208e3701df89c3e231c48f3bdb3d046f
|
Reference in New Issue
Block a user