Skip to content

Commit

Permalink
Fix post-processing bug that was destroying network stats (incl.
Browse files Browse the repository at this point in the history
distributed stats) for outermost masked levels.
  • Loading branch information
angshuman-parashar committed Sep 19, 2023
1 parent 5884319 commit 1ddd52f
Showing 1 changed file with 41 additions and 5 deletions.
46 changes: 41 additions & 5 deletions src/loop-analysis/tiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,12 @@ void MaskTiles(std::vector<DataMovementInfo>& tile_nest, std::bitset<MaxTilingLe
tile_nest[cur].content_accesses = 0;
tile_nest[cur].parent_access_share = 0;

// There is some ugliness here. We have a follow-up call to
// ProcessOuterMaskedLevels() which will destroy even more stats from
// this level, so the above resets are perhaps not needed. Make sure
// there aren't any intervening calls between here and POML() and then
// delete most of the above lines *except* the trigger that POML needs.

// Break out, we are done since there is no other unmasked level.
break;
}
Expand Down Expand Up @@ -284,8 +290,12 @@ void ProcessOuterMaskedLevels(std::vector<DataMovementInfo>& tile_nest, std::bit
// Work on all outermost masked levels until we find an unmasked level.
if (!mask[cur])
{
// Blow up *all* stats (including network stats).
// Blow up all stats (including network stats), *except* distributed
// network stats (because distributed network traffic is transferred
// on behalf of the child level.
auto saved_distributed_stats = tile_nest[cur].distributed_access_stats;
tile_nest[cur].Reset();
tile_nest[cur].distributed_access_stats = saved_distributed_stats;
}
else
{
Expand Down Expand Up @@ -845,6 +855,33 @@ ComputeNest CollapseComputeNest(analysis::CompoundComputeNest& tiles, int num_ti
return solution;
}

void PrintTiles(std::vector<DataMovementInfo>& tile_nest)
{
int num_tiling_levels = tile_nest.size();

for (int cur = 0; cur < num_tiling_levels; cur++)
{
auto& tile = tile_nest[cur];

std::cout << "Level " << cur << std::endl;
std::cout << " size = " << tile.size << std::endl;
std::cout << " ca = " << tile.content_accesses << std::endl;
std::cout << " access[" << tile.access_stats.stats.size() << "]:\n";
for (auto& x: tile.access_stats.stats)
{
if (x.second.accesses > 0)
std::cout << " [" << x.first.first << "," << x.first.second << "]: accesses = "
<< x.second.accesses << " hops = " << x.second.hops << std::endl;
}
std::cout << " dist access[" << tile.access_stats.stats.size() << "]:\n";
for (auto& x: tile.distributed_access_stats.stats)
{
if (x.second.accesses > 0)
std::cout << " [" << x.first.first << "," << x.first.second << "]: accesses = "
<< x.second.accesses << " hops = " << x.second.hops << std::endl;
}
}
}

// Collapse tiles into a given number of levels.
// Input and output are both arrays of tile nests,
Expand Down Expand Up @@ -945,6 +982,9 @@ CompoundDataMovementNest CollapseDataMovementNest(analysis::CompoundDataMovement
// Calculate share of parent accesses (used to compute fills).
ComputeParentAccessShare(solution[pv]);

// Perform distributed-multicast if supported.
DistributeTiles(solution[pv], distribution_supported[pv]);

// Mask each solution according to the provided bit mask.
MaskTiles(solution[pv], tile_mask[pv]);

Expand All @@ -954,9 +994,6 @@ CompoundDataMovementNest CollapseDataMovementNest(analysis::CompoundDataMovement
// Set backing storage fill to zero place holder
ResetBackingStorageFillsPlaceHolder(solution[pv]);

// Perform distributed-multicast if supported.
DistributeTiles(solution[pv], distribution_supported[pv]);

// Calculate the extra accesses and fills due to link transfers
ComputePeerAccesses(solution[pv]);

Expand All @@ -969,7 +1006,6 @@ CompoundDataMovementNest CollapseDataMovementNest(analysis::CompoundDataMovement
// Find the parent and child levels for later compression/decompression logic
SetParentLevel(solution[pv]);
SetChildLevel(solution[pv]);

}

// flip the workload tensor set flag if necessary
Expand Down

0 comments on commit 1ddd52f

Please sign in to comment.