Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
Ubuntu committed May 24, 2022
2 parents 8909517 + 2f53bd4 commit 95afe70
Show file tree
Hide file tree
Showing 72 changed files with 5,935 additions and 390 deletions.
304 changes: 271 additions & 33 deletions config/main.py

Large diffs are not rendered by default.

60 changes: 58 additions & 2 deletions config/plugins/auto_techsupport.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,50 @@ def AUTO_TECHSUPPORT_GLOBAL_max_core_limit(db, max_core_limit):
exit_with_error(f"Error: {err}", fg="red")


@AUTO_TECHSUPPORT_GLOBAL.command(name="available-mem-threshold")
@click.argument(
"available-mem-threshold",
nargs=1,
required=True,
)
@clicommon.pass_db
def AUTO_TECHSUPPORT_GLOBAL_available_mem_threshold(db, available_mem_threshold):
""" Memory threshold; 0 to disable techsupport invocation on memory usage threshold crossing.
"""

table = "AUTO_TECHSUPPORT"
key = "GLOBAL"
data = {
"available_mem_threshold": available_mem_threshold,
}
try:
update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True)
except Exception as err:
exit_with_error(f"Error: {err}", fg="red")


@AUTO_TECHSUPPORT_GLOBAL.command(name="min-available-mem")
@click.argument(
"min-available-mem",
nargs=1,
required=True,
)
@clicommon.pass_db
def AUTO_TECHSUPPORT_GLOBAL_min_available_mem(db, min_available_mem):
""" Minimum free memory amount in Kb when techsupport will be executed.
"""

table = "AUTO_TECHSUPPORT"
key = "GLOBAL"
data = {
"min_available_mem": min_available_mem,
}
try:
update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True)
except Exception as err:
exit_with_error(f"Error: {err}", fg="red")


@AUTO_TECHSUPPORT_GLOBAL.command(name="since")
@click.argument(
"since",
Expand Down Expand Up @@ -271,8 +315,12 @@ def AUTO_TECHSUPPORT_FEATURE():
"--rate-limit-interval",
help="Rate limit interval for the corresponding feature. Configure 0 to explicitly disable",
)
@click.option(
"--available-mem-threshold",
help="Memory threshold; 0 to disable techsupport invocation on memory usage threshold crossing.",
)
@clicommon.pass_db
def AUTO_TECHSUPPORT_FEATURE_add(db, feature_name, state, rate_limit_interval):
def AUTO_TECHSUPPORT_FEATURE_add(db, feature_name, state, rate_limit_interval, available_mem_threshold):
""" Add object in AUTO_TECHSUPPORT_FEATURE. """

table = "AUTO_TECHSUPPORT_FEATURE"
Expand All @@ -282,6 +330,8 @@ def AUTO_TECHSUPPORT_FEATURE_add(db, feature_name, state, rate_limit_interval):
data["state"] = state
if rate_limit_interval is not None:
data["rate_limit_interval"] = rate_limit_interval
if available_mem_threshold is not None:
data["available_mem_threshold"] = available_mem_threshold

try:
add_entry_validated(db.cfgdb, table, key, data)
Expand All @@ -303,8 +353,12 @@ def AUTO_TECHSUPPORT_FEATURE_add(db, feature_name, state, rate_limit_interval):
"--rate-limit-interval",
help="Rate limit interval for the corresponding feature. Configure 0 to explicitly disable",
)
@click.option(
"--available-mem-threshold",
help="Memory threshold; 0 to disable techsupport invocation on memory usage threshold crossing.",
)
@clicommon.pass_db
def AUTO_TECHSUPPORT_FEATURE_update(db, feature_name, state, rate_limit_interval):
def AUTO_TECHSUPPORT_FEATURE_update(db, feature_name, state, rate_limit_interval, available_mem_threshold):
""" Add object in AUTO_TECHSUPPORT_FEATURE. """

table = "AUTO_TECHSUPPORT_FEATURE"
Expand All @@ -314,6 +368,8 @@ def AUTO_TECHSUPPORT_FEATURE_update(db, feature_name, state, rate_limit_interval
data["state"] = state
if rate_limit_interval is not None:
data["rate_limit_interval"] = rate_limit_interval
if available_mem_threshold is not None:
data["available_mem_threshold"] = available_mem_threshold

try:
update_entry_validated(db.cfgdb, table, key, data)
Expand Down
3 changes: 3 additions & 0 deletions doc/Command-Reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -5281,8 +5281,11 @@ If vrf-name is also provided as part of the command, if the vrf is created it wi
default Vlan20
Vrf-red Vlan100
Loopback11
Eth0.100
Vrf-blue Loopback100
Loopback102
Ethernet0.10
PortChannel101
````
### VRF config commands
Expand Down
18 changes: 13 additions & 5 deletions dump/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from tabulate import tabulate
from sonic_py_common import multi_asic
from utilities_common.constants import DEFAULT_NAMESPACE
from swsscommon.swsscommon import ConfigDBConnector
from dump.match_infra import RedisSource, JsonSource, ConnectionPool
from dump import plugins

Expand Down Expand Up @@ -82,7 +83,10 @@ def state(ctx, module, identifier, db, table, key_map, verbose, namespace):
params['namespace'] = namespace
for arg in ids:
params[plugins.dump_modules[module].ARG_NAME] = arg
collected_info[arg] = obj.execute(params)
try:
collected_info[arg] = obj.execute(params)
except ValueError as err:
click.fail(f"Failed to execute plugin: {err}")

if len(db) > 0:
collected_info = filter_out_dbs(db, collected_info)
Expand Down Expand Up @@ -151,7 +155,7 @@ def populate_fv(info, module, namespace):
db_cfg_file = JsonSource()
db_conn = ConnectionPool().initialize_connector(namespace)
for db_name in all_dbs:
if db_name is "CONFIG_FILE":
if db_name == "CONFIG_FILE":
db_cfg_file.connect(plugins.dump_modules[module].CONFIG_FILE, namespace)
else:
db_conn.connect(db_name)
Expand All @@ -164,7 +168,7 @@ def populate_fv(info, module, namespace):
final_info[id][db_name]["keys"] = []
final_info[id][db_name]["tables_not_found"] = info[id][db_name]["tables_not_found"]
for key in info[id][db_name]["keys"]:
if db_name is "CONFIG_FILE":
if db_name == "CONFIG_FILE":
fv = db_cfg_file.get(db_name, key)
else:
fv = db_conn.get_all(db_name, key)
Expand All @@ -174,9 +178,13 @@ def populate_fv(info, module, namespace):


def get_dict_str(key_obj):
conn = ConfigDBConnector()
table = []
for pair in key_obj.items():
table.append(list(pair))
key_obj = conn.raw_to_typed(key_obj)
for field, value in key_obj.items():
if isinstance(value, list):
value = "\n".join(value)
table.append((field, value))
return tabulate(table, headers=["field", "value"], tablefmt="psql")


Expand Down
31 changes: 30 additions & 1 deletion dump/match_helper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,23 @@
from dump.match_infra import MatchRequest
from dump.helper import handle_multiple_keys_matched_error

# Return dict helper methods

def check_error(ret):
""" Check if the match request failed """
if ret["error"]:
return True, ret["error"]
else:
return False, ""

def get_matched_keys(ret):
""" Return Matched Keys """
failed, err_str = check_error(ret)
if not failed:
return ret["keys"], ""
else:
return [], err_str

# Port Helper Methods

def fetch_port_oid(match_engine, port_name, ns):
Expand All @@ -27,7 +44,7 @@ def fetch_vlan_oid(match_engine, vlan_name, ns):
vlan_num = int(vlan_name[4:])

# Find the table named "ASIC_STATE:SAI_OBJECT_TYPE_VLAN:*" in which SAI_VLAN_ATTR_VLAN_ID = vlan_num
req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_VLAN", key_pattern="*", field="SAI_VLAN_ATTR_VLAN_ID",
req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_VLAN", key_pattern="*", field="SAI_VLAN_ATTR_VLAN_ID",
value=str(vlan_num), ns=ns)
ret = match_engine.fetch(req)
vlan_oid = ""
Expand Down Expand Up @@ -83,3 +100,15 @@ def fetch_lag_oid(match_engine, lag_name, ns):
lag_oids matched {}".format(lag_name, lag_type_oids), lag_type_oids[-1])
lag_type_oid = lag_type_oids[-1]
return lag_type_oid

# ACL helpers

def fetch_acl_counter_oid(match_engine, acl_table_name, acl_rule_name, ns):
"""
Fetch ACL counter OID from COUNTERS DB for a particular rule
"""
counters_db = match_engine.get_redis_source_adapter()
counters_db.connect("COUNTERS_DB", ns)
counters = counters_db.hgetall("COUNTERS_DB", "ACL_COUNTER_RULE_MAP")
counter_oid = counters.get(f"{acl_table_name}{counters_db.get_separator('COUNTERS_DB')}{acl_rule_name}")
return counter_oid
26 changes: 24 additions & 2 deletions dump/match_infra.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,10 @@ def hget(self, db, key, field):
def get_separator(self, db):
return ""

@abstractmethod
def hgetall(self, db, key):
raise NotImplementedError


class RedisSource(SourceAdapter):
""" Concrete Adaptor Class for connecting to Redis Data Sources """
Expand Down Expand Up @@ -182,6 +186,9 @@ def get(self, db, key):
def hget(self, db, key, field):
return self.conn.get(db, key, field)

def hgetall(self, db, key):
return self.conn.get_all(db, key)


class JsonSource(SourceAdapter):
""" Concrete Adaptor Class for connecting to JSON Data Sources """
Expand Down Expand Up @@ -219,6 +226,11 @@ def hget(self, db, key, field):
table, key = key.split(sep, 1)
return self.json_data.get(table, "").get(key, "").get(field, "")

def hgetall(self, db, key):
sep = self.get_separator(db)
table, key = key.split(sep, 1)
return self.json_data.get(table, {}).get(key)


class ConnectionPool:
""" Caches SonicV2Connector objects for effective reuse """
Expand Down Expand Up @@ -250,6 +262,10 @@ def clear(self, namespace=None):
elif namespace in self.cache:
del self.cache[namespace]

def fill(self, ns, conn, connected_to):
""" Update internal cache """
self.cache[ns] = {'conn': conn, 'connected_to': set(connected_to)}


class MatchEngine:
"""
Expand All @@ -267,15 +283,21 @@ def __init__(self, pool=None):
def clear_cache(self, ns):
self.conn_pool(ns)

def get_redis_source_adapter(self):
return RedisSource(self.conn_pool)

def get_json_source_adapter(self):
return JsonSource()

def __get_source_adapter(self, req):
src = None
d_src = ""
if req.db:
d_src = req.db
src = RedisSource(self.conn_pool)
src = self.get_redis_source_adapter()
else:
d_src = req.file
src = JsonSource()
src = self.get_json_source_adapter()
return d_src, src

def __create_template(self):
Expand Down
84 changes: 84 additions & 0 deletions dump/plugins/acl_rule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from dump.helper import create_template_dict
from dump.match_infra import MatchRequest
from swsscommon.swsscommon import SonicDBConfig

from dump.match_helper import fetch_acl_counter_oid
from .executor import Executor


CFG_DB_SEPARATOR = SonicDBConfig.getSeparator("CONFIG_DB")
ASIC_DB_SEPARATOR = SonicDBConfig.getSeparator("ASIC_DB")


class Acl_Rule(Executor):
"""
Debug Dump Plugin for ACL Rule Module
"""
ARG_NAME = "acl_rule_name"

def __init__(self, match_engine=None):
super().__init__(match_engine)

def get_all_args(self, ns=""):
req = MatchRequest(db="CONFIG_DB", table="ACL_RULE", key_pattern="*", ns=ns)
ret = self.match_engine.fetch(req)
acl_rules = ret["keys"]
return [key.split(CFG_DB_SEPARATOR, 1)[-1] for key in acl_rules]

def execute(self, params):
self.ret_temp = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])

try:
acl_table_name, acl_rule_name = params[self.ARG_NAME].split(CFG_DB_SEPARATOR, 1)
except ValueError:
raise ValueError(f"Invalid rule name passed {params[self.ARG_NAME]}")

self.ns = params["namespace"]
self.init_acl_rule_config_info(acl_table_name, acl_rule_name)
self.init_acl_rule_asic_info(acl_table_name, acl_rule_name)
return self.ret_temp

def init_acl_rule_config_info(self, acl_table_name, acl_rule_name):
req = MatchRequest(db="CONFIG_DB", table="ACL_RULE",
key_pattern=CFG_DB_SEPARATOR.join([acl_table_name, acl_rule_name]), ns=self.ns)
ret = self.match_engine.fetch(req)
self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"])

def init_acl_rule_asic_info(self, acl_table_name, acl_rule_name):
counter_oid = fetch_acl_counter_oid(self.match_engine, acl_table_name, acl_rule_name, self.ns)
if not counter_oid:
return

req = MatchRequest(db="ASIC_DB", table=ASIC_DB_SEPARATOR.join(["ASIC_STATE", "SAI_OBJECT_TYPE_ACL_COUNTER"]),
key_pattern=counter_oid, return_fields=["SAI_ACL_COUNTER_ATTR_TABLE_ID"], ns=self.ns)
ret = self.match_engine.fetch(req)
self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"])

return_values = ret["return_values"]
counter_object = return_values.get(ASIC_DB_SEPARATOR.join(["ASIC_STATE", "SAI_OBJECT_TYPE_ACL_COUNTER", counter_oid]), {})
table_oid = counter_object.get("SAI_ACL_COUNTER_ATTR_TABLE_ID")
if not table_oid:
raise Exception("Invalid counter object without table OID in ASIC_DB")

req = MatchRequest(db="ASIC_DB", table=ASIC_DB_SEPARATOR.join(["ASIC_STATE", "SAI_OBJECT_TYPE_ACL_ENTRY"]), key_pattern="*",
field="SAI_ACL_ENTRY_ATTR_TABLE_ID", value=table_oid,
return_fields=["SAI_ACL_ENTRY_ATTR_FIELD_ACL_RANGE_TYPE"], ns=self.ns)
ret = self.match_engine.fetch(req)
self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"])

range_oids = set()
for _, entry in ret["return_values"].items():
range_attr_value = entry.get("SAI_ACL_ENTRY_ATTR_FIELD_ACL_RANGE_TYPE")
if not range_attr_value:
continue
ranges_attr_value = range_attr_value.split(ASIC_DB_SEPARATOR, 1)
if len(range_attr_value) < 2:
raise Exception("Invalid SAI_ACL_ENTRY_ATTR_FIELD_ACL_RANGE_TYPE field format")
for oid in ranges_attr_value[1].split(','):
range_oids.add(oid)

for range_oid in range_oids:
req = MatchRequest(db="ASIC_DB", table=ASIC_DB_SEPARATOR.join(["ASIC_STATE", "SAI_OBJECT_TYPE_ACL_RANGE"]),
key_pattern=range_oid, ns=self.ns)
ret = self.match_engine.fetch(req)
self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"])
Loading

0 comments on commit 95afe70

Please sign in to comment.