Skip to content

Commit

Permalink
Merge pull request #169 from LorenzoBianconi/ovn-ic
Browse files Browse the repository at this point in the history
ovn-tester: introduce ovn-ic
  • Loading branch information
dceara authored Oct 19, 2023
2 parents eb0ab8e + dc56375 commit c43ef3d
Show file tree
Hide file tree
Showing 21 changed files with 666 additions and 326 deletions.
3 changes: 2 additions & 1 deletion .cirrus.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,9 @@ low_scale_task:

test_script:
- 'sed -i "s/^ log_cmds\: False/ log_cmds\: True/"
test-scenarios/ovn-low-scale.yml'
test-scenarios/ovn-low-scale*.yml'
- ./do.sh run test-scenarios/ovn-low-scale.yml low-scale
- ./do.sh run test-scenarios/ovn-low-scale-ic.yml low-scale-ic

check_logs_script:
- ./utils/logs-checker.sh
Expand Down
2 changes: 1 addition & 1 deletion do.sh
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ function run_test() {

cluster_vars=""
for var in enable_ssl clustered_db monitor_all use_ovsdb_etcd \
node_net datapath_type n_relays n_workers; do
node_net datapath_type n_relays n_workers n_az; do
cluster_vars="${cluster_vars} $(get_cluster_var ${test_file} ${var})"
done
echo "-- Cluster vars: ${cluster_vars}"
Expand Down
14 changes: 13 additions & 1 deletion ovn-fake-multinode-utils/playbooks/bringup-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,19 @@
- name: Bring up central nodes
hosts: central_hosts
tasks:
- name: Compute central facts (standalone)
when: clustered_db == "no"
ansible.builtin.set_fact:
n_ips: '{{ n_relays|int + 1 }}'

- name: Compute central facts (clustered)
when: clustered_db == "yes"
ansible.builtin.set_fact:
n_ips: '{{ n_relays|int + 3 }}'

- name: Start central containers
environment:
CENTRAL_COUNT: '{{ n_az }}'
CHASSIS_COUNT: 0
CREATE_FAKE_VMS: no
ENABLE_ETCD: '{{ use_ovsdb_etcd }}'
Expand All @@ -46,14 +57,15 @@
tasks:
- name: Start worker containers
environment:
CENTRAL_COUNT: '{{ n_az }}'
CHASSIS_COUNT: 0
CREATE_FAKE_VMS: no
ENABLE_ETCD: '{{ use_ovsdb_etcd }}'
ENABLE_SSL: '{{ enable_ssl }}'
GW_COUNT: 0
IP_CIDR: "{{ node_net|ansible.utils.ipaddr('prefix') }}"
IP_HOST: "{{ node_net|ansible.utils.ipaddr('network') }}"
IP_START: "{{ node_net|ansible.utils.ipmath(2 + item|int) }}"
IP_START: "{{ node_net|ansible.utils.ipmath(2 + n_az|int * n_ips|int + item|int) }}"
OVN_DB_CLUSTER: '{{ clustered_db }}'
OVN_DP_TYPE: '{{ datapath_type }}'
OVN_MONITOR_ALL: '{{ monitor_all }}'
Expand Down
3 changes: 3 additions & 0 deletions ovn-fake-multinode-utils/translate_yaml.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,13 @@ class ClusterConfig:
external_net6: str = "3::/64"
gw_net: str = "2.0.0.0/16"
gw_net6: str = "2::/64"
ts_net: str = "30.0.0.0/16"
ts_net6: str = "30::/64"
cluster_net: str = "16.0.0.0/4"
cluster_net6: str = "16::/32"
n_workers: int = 2
n_relays: int = 0
n_az: int = 1
vips: Dict = None
vips6: Dict = None
vip_subnet: str = "4.0.0.0/8"
Expand Down
20 changes: 11 additions & 9 deletions ovn-tester/cms/ovn_kubernetes/ovn_kubernetes.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,22 @@

class OVNKubernetes:
@staticmethod
def add_cluster_worker_nodes(cluster, workers):
def add_cluster_worker_nodes(cluster, workers, az):
cluster_cfg = cluster.cluster_cfg

# Allocate worker IPs after central and relay IPs.
mgmt_ip = (
cluster_cfg.node_net.ip
+ 2
+ len(cluster.central_nodes)
+ len(cluster.relay_nodes)
+ cluster_cfg.n_az
* (len(cluster.central_nodes) + len(cluster.relay_nodes))
)

protocol = "ssl" if cluster_cfg.enable_ssl else "tcp"
internal_net = cluster_cfg.internal_net
external_net = cluster_cfg.external_net
gw_net = cluster_cfg.gw_net
# Number of workers for each az
n_az_workers = cluster_cfg.n_workers // cluster_cfg.n_az
cluster.add_workers(
[
WorkerNode(
Expand All @@ -32,14 +33,15 @@ def add_cluster_worker_nodes(cluster, workers):
protocol,
DualStackSubnet.next(internal_net, i),
DualStackSubnet.next(external_net, i),
gw_net,
cluster.gw_net,
i,
)
for i in range(cluster_cfg.n_workers)
for i in range(az * n_az_workers, (az + 1) * n_az_workers)
]
)

@staticmethod
def prepare_test(cluster):
with Context(cluster, 'prepare_test'):
cluster.start()
def prepare_test(clusters):
with Context(clusters, 'prepare_test clusters'):
for c in clusters:
c.start()
94 changes: 76 additions & 18 deletions ovn-tester/cms/ovn_kubernetes/tests/base_cluster_bringup.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,91 @@
from collections import namedtuple

from randmac import RandMac
from ovn_utils import LSwitch
from ovn_context import Context
from ovn_ext_cmd import ExtCmd

ClusterBringupCfg = namedtuple('ClusterBringupCfg', ['n_pods_per_node'])


class BaseClusterBringup(ExtCmd):
def __init__(self, config, cluster, global_cfg):
super().__init__(config, cluster)
def __init__(self, config, clusters, global_cfg):
super().__init__(config, clusters)
test_config = config.get('base_cluster_bringup', dict())
self.config = ClusterBringupCfg(
n_pods_per_node=test_config.get('n_pods_per_node', 0),
)

def run(self, ovn, global_cfg):
# create ovn topology
with Context(
ovn, 'base_cluster_bringup', len(ovn.worker_nodes)
) as ctx:
ovn.create_cluster_router('lr-cluster')
ovn.create_cluster_join_switch('ls-join')
ovn.create_cluster_load_balancer('lb-cluster', global_cfg)
for i in ctx:
worker = ovn.worker_nodes[i]
worker.provision(ovn)
ports = worker.provision_ports(
ovn, self.config.n_pods_per_node
def create_transit_switch(self, cluster):
cluster.icnbctl.ts_add()

def connect_transit_switch(self, cluster):
uuid = cluster.nbctl.ls_get_uuid('ts', 10)
cluster.ts_switch = LSwitch(
name='ts',
cidr=cluster.cluster_cfg.ts_net.n4,
cidr6=cluster.cluster_cfg.ts_net.n6,
uuid=uuid,
)
rp = cluster.nbctl.lr_port_add(
cluster.router,
f'lr-cluster{cluster.az}-to-ts',
RandMac(),
cluster.cluster_cfg.ts_net.forward(cluster.az),
)
cluster.nbctl.ls_port_add(
cluster.ts_switch, f'ts-to-lr-cluster{cluster.az}', rp
)
cluster.nbctl.lr_port_set_gw_chassis(
rp, cluster.worker_nodes[0].container
)
cluster.worker_nodes[0].vsctl.set_global_external_id(
'ovn-is-interconn', 'true'
)

def check_ic_connectivity(self, clusters):
ic_cluster = clusters[0]
for cluster in clusters:
if ic_cluster == cluster:
continue
for w in cluster.worker_nodes:
port = w.lports[0]
if port.ip:
ic_cluster.worker_nodes[0].run_ping(
ic_cluster,
ic_cluster.worker_nodes[0].lports[0].name,
port.ip,
)
if port.ip6:
ic_cluster.worker_nodes[0].run_ping(
ic_cluster,
ic_cluster.worker_nodes[0].lports[0].name,
port.ip6,
)

def run(self, clusters, global_cfg):
self.create_transit_switch(clusters[0])

for c, cluster in enumerate(clusters):
# create ovn topology
with Context(
clusters, 'base_cluster_bringup', len(cluster.worker_nodes)
) as ctx:
cluster.create_cluster_router(f'lr-cluster{c+1}')
cluster.create_cluster_join_switch(f'ls-join{c+1}')
cluster.create_cluster_load_balancer(
f'lb-cluster{c+1}', global_cfg
)
worker.provision_load_balancers(ovn, ports, global_cfg)
worker.ping_ports(ovn, ports)
ovn.provision_lb_group()
self.connect_transit_switch(cluster)
for i in ctx:
worker = cluster.worker_nodes[i]
worker.provision(cluster)
ports = worker.provision_ports(
cluster, self.config.n_pods_per_node
)
worker.provision_load_balancers(cluster, ports, global_cfg)
worker.ping_ports(cluster, ports)
cluster.provision_lb_group(f'cluster-lb-group{c+1}')

# check ic connectivity
self.check_ic_connectivity(clusters)
46 changes: 30 additions & 16 deletions ovn-tester/cms/ovn_kubernetes/tests/cluster_density.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@


class ClusterDensity(ExtCmd):
def __init__(self, config, cluster, global_cfg):
super().__init__(config, cluster)
def __init__(self, config, clusters, global_cfg):
super().__init__(config, clusters)
test_config = config.get('cluster_density', dict())
self.config = ClusterDensityCfg(
n_runs=test_config.get('n_runs', 0),
Expand All @@ -27,57 +27,71 @@ def __init__(self, config, cluster, global_cfg):
if self.config.n_startup > self.config.n_runs:
raise ovn_exceptions.OvnInvalidConfigException()

def run_iteration(self, ovn, index, global_cfg, passive):
ns = Namespace(ovn, f'NS_density_{index}', global_cfg)
def run_iteration(self, clusters, index, global_cfg, passive):
ns = Namespace(clusters, f'NS_density_{index}', global_cfg)
az_index = index % len(clusters)
ovn = clusters[az_index]
# Create DENSITY_N_BUILD_PODS short lived "build" pods.
if not passive:
build_ports = ovn.provision_ports(DENSITY_N_BUILD_PODS, passive)
ns.add_ports(build_ports)
ns.add_ports(build_ports, az_index)
ovn.ping_ports(build_ports)

# Add DENSITY_N_PODS test pods and provision them as backends
# to the namespace load balancer.
ports = ovn.provision_ports(DENSITY_N_PODS, passive)
ns.add_ports(ports)
ns.create_load_balancer()
ns.add_ports(ports, az_index)
ns.create_load_balancer(az_index)
ovn.provision_lb(ns.load_balancer)
if global_cfg.run_ipv4:
ns.provision_vips_to_load_balancers(
[ports[0:2], ports[2:3], ports[3:4]], 4
[ports[0:2], ports[2:3], ports[3:4]],
4,
az_index,
)
if global_cfg.run_ipv6:
ns.provision_vips_to_load_balancers(
[ports[0:2], ports[2:3], ports[3:4]], 6
[ports[0:2], ports[2:3], ports[3:4]],
6,
az_index,
)

# Ping the test pods and remove the short lived ones.
if not passive:
ovn.ping_ports(ports)
ns.unprovision_ports(build_ports)
ns.unprovision_ports(build_ports, az_index)
return ns

def run(self, ovn, global_cfg):
def run(self, clusters, global_cfg):
all_ns = []
with Context(ovn, 'cluster_density_startup', brief_report=True) as ctx:
with Context(
clusters, 'cluster_density_startup', brief_report=True
) as ctx:
for index in range(self.config.n_startup):
all_ns.append(
self.run_iteration(ovn, index, global_cfg, passive=True)
self.run_iteration(
clusters, index, global_cfg, passive=True
)
)

with Context(
ovn,
clusters,
'cluster_density',
self.config.n_runs - self.config.n_startup,
test=self,
) as ctx:
for i in ctx:
index = self.config.n_startup + i
all_ns.append(
self.run_iteration(ovn, index, global_cfg, passive=False)
self.run_iteration(
clusters, index, global_cfg, passive=False
)
)

if not global_cfg.cleanup:
return
with Context(ovn, 'cluster_density_cleanup', brief_report=True) as ctx:
with Context(
clusters, 'cluster_density_cleanup', brief_report=True
) as ctx:
for ns in all_ns:
ns.unprovision()
32 changes: 20 additions & 12 deletions ovn-tester/cms/ovn_kubernetes/tests/density_heavy.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@


class DensityHeavy(ExtCmd):
def __init__(self, config, cluster, global_cfg):
super().__init__(config, cluster)
def __init__(self, config, clusters, global_cfg):
super().__init__(config, clusters)
test_config = config.get('density_heavy', dict())
pods_vip_ratio = test_config.get(
'pods_vip_ratio', DENSITY_PODS_VIP_RATIO
Expand Down Expand Up @@ -53,9 +53,10 @@ def create_lb(self, cluster, name, vip, backends, version):
)
self.lb_list.append(load_balancer)

def run_iteration(self, ovn, ns, index, global_cfg, passive):
def run_iteration(self, clusters, ns, index, global_cfg, passive):
ovn = clusters[index % len(clusters)]
ports = ovn.provision_ports(self.config.pods_vip_ratio, passive)
ns.add_ports(ports)
ns.add_ports(ports, index % len(clusters))
backends = ports[0:1]
if global_cfg.run_ipv4:
name = f'density_heavy_{index}'
Expand All @@ -66,30 +67,37 @@ def run_iteration(self, ovn, ns, index, global_cfg, passive):
if not passive:
ovn.ping_ports(ports)

def run(self, ovn, global_cfg):
def run(self, clusters, global_cfg):
if self.config.pods_vip_ratio == 0:
return

ns = Namespace(ovn, 'ns_density_heavy', global_cfg)
with Context(ovn, 'density_heavy_startup', brief_report=True) as ctx:
ns = Namespace(clusters, 'ns_density_heavy', global_cfg)
with Context(
clusters, 'density_heavy_startup', brief_report=True
) as ctx:
for i in range(
0, self.config.n_startup, self.config.pods_vip_ratio
):
self.run_iteration(ovn, ns, i, global_cfg, passive=True)
self.run_iteration(clusters, ns, i, global_cfg, passive=True)

with Context(
ovn,
clusters,
'density_heavy',
(self.config.n_pods - self.config.n_startup)
/ self.config.pods_vip_ratio,
test=self,
) as ctx:
for i in ctx:
index = i * self.config.pods_vip_ratio + self.config.n_startup
self.run_iteration(ovn, ns, index, global_cfg, passive=False)
self.run_iteration(
clusters, ns, index, global_cfg, passive=False
)

if not global_cfg.cleanup:
return
with Context(ovn, 'density_heavy_cleanup', brief_report=True) as ctx:
ovn.unprovision_vips()
with Context(
clusters, 'density_heavy_cleanup', len(clusters), brief_report=True
) as ctx:
for i in ctx:
clusters[i].unprovision_vips()
ns.unprovision()
Loading

0 comments on commit c43ef3d

Please sign in to comment.