diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h index df2048e3244..d0b456f0c24 100644 --- a/orchagent/dash/dashorch.h +++ b/orchagent/dash/dashorch.h @@ -14,6 +14,7 @@ #include "macaddress.h" #include "timer.h" #include "dashorch.h" +#include "saihelper.h" struct ApplianceEntry { diff --git a/orchagent/dash/dashvnetorch.cpp b/orchagent/dash/dashvnetorch.cpp index d293756f7af..e9fc357f265 100644 --- a/orchagent/dash/dashvnetorch.cpp +++ b/orchagent/dash/dashvnetorch.cpp @@ -17,6 +17,7 @@ #include "saiextensions.h" #include "swssnet.h" #include "tokenize.h" +#include "dashorch.h" using namespace std; using namespace swss; diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 9afa8ec9e92..cad1a947794 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -716,8 +716,9 @@ int main(int argc, char **argv) if (gMySwitchType == "voq") { orchDaemon->setFabricEnabled(true); - orchDaemon->setFabricPortStatEnabled(true); - orchDaemon->setFabricQueueStatEnabled(true); + // SAI doesn't fully support counters for non fabric asics + orchDaemon->setFabricPortStatEnabled(false); + orchDaemon->setFabricQueueStatEnabled(false); } } else diff --git a/tests/mock_tests/test_failure_handling.cpp b/tests/mock_tests/test_failure_handling.cpp index 7381f4015ee..219b7f12578 100644 --- a/tests/mock_tests/test_failure_handling.cpp +++ b/tests/mock_tests/test_failure_handling.cpp @@ -52,7 +52,6 @@ namespace saifailure_test _hook_sai_switch_api(); _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); - _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); *_sai_syncd_notifications_count = 0; diff --git a/tests/test_mux.py b/tests/test_mux.py index 54340808eac..deaa3bc1636 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -897,6 +897,80 @@ def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): self.del_neighbor(dvs, neigh_ip) self.del_neighbor(dvs, neigh_ipv6) + def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): + ''' + Tests case where neighbor is removed in standby and added in active with route + ''' + nh_route = "2.2.2.0/24" + nh_route_ipv6 = "2023::/64" + neigh_ip = self.SERV1_IPV4 + neigh_ipv6 = self.SERV1_IPV6 + apdb = dvs.get_app_db() + + # Setup + self.set_mux_state(appdb, "Ethernet0", "active") + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + apdb.wait_for_entry("ROUTE_TABLE", nh_route) + apdb.wait_for_entry("ROUTE_TABLE", nh_route_ipv6) + + rtkeys = dvs_route.check_asicdb_route_entries([nh_route]) + rtkeys_ipv6 = dvs_route.check_asicdb_route_entries([nh_route_ipv6]) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + + # Set state to standby and delete neighbor + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) + + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ip) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ipv6) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) + + # Set state to active, learn neighbor again + self.set_mux_state(appdb, "Ethernet0", "active") + + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + self.check_neigh_in_asic_db(asicdb, neigh_ip) + self.check_neigh_in_asic_db(asicdb, neigh_ipv6) + + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + dvs.runcmd( + "ip neigh flush " + neigh_ip + ) + dvs.runcmd( + "ip neigh flush " + neigh_ipv6 + ) + + # Cleanup + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { "SAI_ACL_ENTRY_ATTR_PRIORITY": self.ACL_PRIORITY, diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 3b1ef6efd90..e6226c4d421 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -3428,6 +3428,37 @@ def test_vnet_orch_24(self, dvs, testlog): # delete vxlan tunnel delete_vxlan_tunnel(dvs, tunnel_name) + ''' + Test 18 - Test for vxlan custom monitoring config. + ''' + def test_vnet_orch_18(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_18' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet18', tunnel_name, '10009', "", overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, 'Vnet18') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet18', '10009') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3',primary ='9.0.0.1',monitoring='custom', adv_prefix='100.100.1.1/27') + + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.3", "vxlan", "22:33:33:44:44:66") + + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18') + + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy():