Skip to content

Commit

Permalink
Merge remote-tracking branch 'public/master' into ss-reboot
Browse files Browse the repository at this point in the history
  • Loading branch information
vvolam committed Jan 8, 2025
2 parents dc504ea + 0e327c5 commit e878277
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 26 deletions.
55 changes: 35 additions & 20 deletions config/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -1322,6 +1322,18 @@ def flush_configdb(namespace=DEFAULT_NAMESPACE):
return client, config_db


def delete_transceiver_tables():
tables = ["TRANSCEIVER_INFO", "TRANSCEIVER_STATUS", "TRANSCEIVER_PM",
"TRANSCEIVER_FIRMWARE_INFO", "TRANSCEIVER_DOM_SENSOR", "TRANSCEIVER_DOM_THRESHOLD"]
state_db_del_pattern = "|*"

# delete TRANSCEIVER tables from State DB
state_db = SonicV2Connector(use_unix_socket_path=True)
state_db.connect(state_db.STATE_DB, False)
for table in tables:
state_db.delete_all_by_pattern(state_db.STATE_DB, table + state_db_del_pattern)


def migrate_db_to_lastest(namespace=DEFAULT_NAMESPACE):
# Migrate DB contents to latest version
db_migrator = '/usr/local/bin/db_migrator.py'
Expand Down Expand Up @@ -1374,17 +1386,21 @@ def multiasic_write_to_db(filename, load_sysinfo):


def config_file_yang_validation(filename):
config_to_check = read_json_file(filename)
config = read_json_file(filename)
sy = sonic_yang.SonicYang(YANG_DIR)
sy.loadYangModel()
try:
sy.loadData(configdbJson=config_to_check)
sy.validate_data_tree()
except sonic_yang.SonicYangException as e:
click.secho("{} fails YANG validation! Error: {}".format(filename, str(e)),
fg='magenta')
raise click.Abort()

asic_list = [HOST_NAMESPACE]
if multi_asic.is_multi_asic():
asic_list.extend(multi_asic.get_namespace_list())
for scope in asic_list:
config_to_check = config.get(scope) if multi_asic.is_multi_asic() else config
try:
sy.loadData(configdbJson=config_to_check)
sy.validate_data_tree()
except sonic_yang.SonicYangException as e:
click.secho("{} fails YANG validation! Error: {}".format(filename, str(e)),
fg='magenta')
raise click.Abort()

# This is our main entrypoint - the main 'config' command
@click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS)
Expand Down Expand Up @@ -1901,6 +1917,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form
cfg_hwsku = output.strip()

client, config_db = flush_configdb(namespace)
delete_transceiver_tables()

if load_sysinfo:
if namespace is DEFAULT_NAMESPACE:
Expand Down Expand Up @@ -2019,23 +2036,21 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config,
fg='magenta')
raise click.Abort()

config_to_check = read_json_file(golden_config_path)
if multi_asic.is_multi_asic():
# Multiasic has not 100% fully validated. Thus pass here.
pass
else:
config_file_yang_validation(golden_config_path)
config_file_yang_validation(golden_config_path)

config_to_check = read_json_file(golden_config_path)
# Dependency check golden config json
asic_list = [HOST_NAMESPACE]
if multi_asic.is_multi_asic():
host_config = config_to_check.get('localhost', {})
else:
host_config = config_to_check
table_hard_dependency_check(host_config)
asic_list.extend(multi_asic.get_namespace_list())
for scope in asic_list:
host_config = config_to_check.get(scope) if multi_asic.is_multi_asic() else config_to_check
table_hard_dependency_check(host_config)

#Stop services before config push
# Stop services before config push
if not no_service_restart:
log.log_notice("'load_minigraph' stopping services...")
delete_transceiver_tables()
_stop_services()

# For Single Asic platform the namespace list has the empty string
Expand Down
68 changes: 68 additions & 0 deletions scripts/generate_dump
Original file line number Diff line number Diff line change
Expand Up @@ -874,6 +874,42 @@ save_proc() {
chmod ugo+rw -R $DUMPDIR/$BASE/proc
}

###############################################################################
# Given list of sys files, saves sys files to tar.
# Globals:
# V
# TARDIR
# MKDIR
# CP
# DUMPDIR
# TAR
# RM
# BASE
# TARFILE
# NOOP
# Arguments:
# *sysfiles: variable-length list of sys file paths to save
# Returns:
# None
###############################################################################
save_sys() {
trap 'handle_error $? $LINENO' ERR
local sysfiles="$@"
$MKDIR $V -p $TARDIR/sys
for f in $sysfiles
do
if $NOOP; then
if [ -e $f ]; then
echo "$CP $V -r $f $TARDIR/sys"
fi
else
( [ -e $f ] && $CP $V -r $f $TARDIR/sys ) || echo "$f not found" > $TARDIR/$f
fi
done

chmod ugo+rw -R $DUMPDIR/$BASE/sys
}

###############################################################################
# Dump io stats for processes
# Globals:
Expand Down Expand Up @@ -1469,6 +1505,7 @@ collect_marvell_prestera() {
###############################################################################
collect_broadcom() {
trap 'handle_error $? $LINENO' ERR
local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m"
local platform=$(show platform summary --json | python -c 'import sys, json; \
print(json.load(sys.stdin)["platform"])')
local hwsku=$(show platform summary --json | python -c 'import sys, json; \
Expand Down Expand Up @@ -1597,6 +1634,25 @@ collect_broadcom() {

copy_from_masic_docker "syncd" "/var/log/diagrun.log" "/var/log/diagrun.log"
copy_from_masic_docker "syncd" "/var/log/bcm_diag_post" "/var/log/bcm_diag_post"

# run 'hw-management-generate-dump.sh' script and save the result file
HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh
if [ -f "$HW_DUMP_FILE" ]; then
${CMD_PREFIX}${timeout_cmd} /usr/bin/hw-management-generate-dump.sh $ALLOW_PROCESS_STOP
ret=$?
if [ $ret -ne 0 ]; then
if [ $ret -eq $TIMEOUT_EXIT_CODE ]; then
echo "hw-management dump timedout after ${TIMEOUT_MIN} minutes."
else
echo "hw-management dump failed ..."
fi
else
save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false
rm -f /tmp/hw-mgmt-dump*
fi
else
echo "HW Mgmt dump script $HW_DUMP_FILE does not exist"
fi
}

###############################################################################
Expand Down Expand Up @@ -1805,6 +1861,12 @@ save_log_files() {
file_list="${file_list} ${file_list_2}"
fi

# save log files creation info
files_ts_info=/tmp/files.timestamp.info
ls --time-style='+%d-%m-%Y %H:%M:%S' -last /var/log/* > $files_ts_info
save_file $files_ts_info "log" true
rm -f $files_ts_info

# gzip up all log files individually before placing them in the incremental tarball
for file in $file_list; do
dest_dir="log"
Expand Down Expand Up @@ -2071,6 +2133,12 @@ main() {
echo "[ Capture Proc State ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO
wait

# capture /sys info - include acpi info
save_sys /sys/firmware/acpi/tables &
end_t2=$(date +%s%3N)
echo "[ Capture Sys info ] : $(($end_t2-$end_t)) msec" >> $TECHSUPPORT_TIME_INFO
wait

# Save all the processes within each docker
save_cmd "show services" services.summary &

Expand Down
11 changes: 8 additions & 3 deletions scripts/ipintutil
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,14 @@ def get_bgp_peer():
data = config_db.get_table('BGP_NEIGHBOR')

for neighbor_ip in data.keys():
local_addr = data[neighbor_ip]['local_addr']
neighbor_name = data[neighbor_ip]['name']
bgp_peer.setdefault(local_addr, [neighbor_name, neighbor_ip])
# The data collected here will only work for manually defined neighbors
# so we need to ignore errors when using BGP Unnumbered.
try:
local_addr = data[neighbor_ip]['local_addr']
neighbor_name = data[neighbor_ip]['name']
bgp_peer.setdefault(local_addr, [neighbor_name, neighbor_ip])
except KeyError:
pass
return bgp_peer


Expand Down
1 change: 1 addition & 0 deletions scripts/portstat
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ try:
import sonic_py_common
from swsscommon.swsscommon import SonicV2Connector
sonic_py_common.device_info.is_supervisor = mock.MagicMock(return_value=True)
sonic_py_common.device_info.is_voq_chassis = mock.MagicMock(return_value=True)
SonicV2Connector.delete_all_by_pattern = mock.MagicMock()
if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic":
import mock_tables.mock_multi_asic
Expand Down
8 changes: 8 additions & 0 deletions tests/mock_tables/config_db.json
Original file line number Diff line number Diff line change
Expand Up @@ -2059,6 +2059,14 @@
"asn": "65200",
"keepalive": "3"
},
"BGP_NEIGHBOR|Vlan100": {
"rrclient": "0",
"peer_type": "external",
"nhopself": "0",
"admin_status": "up",
"holdtime": "10",
"keepalive": "3"
},
"SCHEDULER|scheduler.0": {
"type": "DWRR",
"weight": "14"
Expand Down
13 changes: 10 additions & 3 deletions utilities_common/portstat.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@
class Portstat(object):
def __init__(self, namespace, display_option):
self.db = None
self.namespace = namespace
self.display_option = display_option
self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace)
if device_info.is_supervisor():
self.db = SonicV2Connector(use_unix_socket_path=False)
Expand All @@ -132,7 +134,8 @@ def get_cnstat_dict(self):
self.cnstat_dict['time'] = datetime.datetime.now()
self.ratestat_dict = OrderedDict()
if device_info.is_supervisor():
self.collect_stat_from_lc()
if device_info.is_voq_chassis() or (self.namespace is None and self.display_option != 'all'):
self.collect_stat_from_lc()
else:
self.collect_stat()
return self.cnstat_dict, self.ratestat_dict
Expand Down Expand Up @@ -405,7 +408,9 @@ def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_al
print(table_as_json(table, header))
else:
print(tabulate(table, header, tablefmt='simple', stralign='right'))
if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json:
if device_info.is_voq_chassis():
return
elif (multi_asic.is_multi_asic() or device_info.is_packet_chassis()) and not use_json:
print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n")

def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list):
Expand Down Expand Up @@ -668,5 +673,7 @@ def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict,
print(table_as_json(table, header))
else:
print(tabulate(table, header, tablefmt='simple', stralign='right'))
if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json:
if device_info.is_voq_chassis():
return
elif (multi_asic.is_multi_asic() or device_info.is_packet_chassis()) and not use_json:
print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n")

0 comments on commit e878277

Please sign in to comment.