From 16291d07038959c5608f9cd923c76ba01c07e7aa Mon Sep 17 00:00:00 2001 From: Doug Binks Date: Sat, 27 Feb 2021 14:31:08 +0000 Subject: [PATCH] Fix and test for #60 - adding a large number of tasksets with m_SetSize > square of number of threads --- example/TestAll.cpp | 48 +++++++++++++++++++++++++++++++++++++++++++ src/TaskScheduler.cpp | 3 ++- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/example/TestAll.cpp b/example/TestAll.cpp index 22cb93bd..f96903f2 100644 --- a/example/TestAll.cpp +++ b/example/TestAll.cpp @@ -218,6 +218,54 @@ int main(int argc, const char * argv[]) serialTask.ExecuteRange( range, 0 ); sumSerial = serialTask.m_pPartialSums[0].count; + + + RunTestFunction( + "Test Lots of TaskSets", + [&]()->bool + { + g_TS.Initialize( baseConfig ); + + static constexpr uint32_t TASK_RANGE = 65*65; + static constexpr uint32_t TASK_COUNT = 50; + + + struct TaskSet : public enki::ITaskSet + { + TaskSet() : enki::ITaskSet(TASK_RANGE) {}; + virtual void ExecuteRange( TaskSetPartition range_, uint32_t threadnum_ ) override + { + if( range_.start >= TASK_RANGE && range_.end > TASK_RANGE ) + { + countErrors.fetch_add(1); + } + } + + std::atomic countErrors{ 0 }; + }; + + TaskSet tasks[TASK_COUNT]; + + for( uint32_t i = 0; i < TASK_COUNT; ++i ) + { + g_TS.AddTaskSetToPipe( &tasks[i] ); + } + + g_TS.WaitforAll(); + + bool bSuccess = true; + for( uint32_t i = 0; i < TASK_COUNT; ++i ) + { + if( tasks[i].countErrors.load( std::memory_order_relaxed ) > 0 ) + { + bSuccess = false; + break; + } + } + + return bSuccess; + } + ); RunTestFunction( "Parallel Reduction Sum", [&]()->bool diff --git a/src/TaskScheduler.cpp b/src/TaskScheduler.cpp index 88555141..39436506 100644 --- a/src/TaskScheduler.cpp +++ b/src/TaskScheduler.cpp @@ -653,9 +653,10 @@ void TaskScheduler::SplitAndAddTask( uint32_t threadNum_, SubTaskSet subTask_, u } numAdded = 0; // alter range to run the appropriate fraction - if( taskToAdd.pTask->m_RangeToRun < rangeToSplit_ ) + if( taskToAdd.pTask->m_RangeToRun < taskToAdd.partition.end - taskToAdd.partition.start ) { taskToAdd.partition.end = taskToAdd.partition.start + taskToAdd.pTask->m_RangeToRun; + assert( taskToAdd.partition.end <= taskToAdd.pTask->m_SetSize ); subTask_.partition.start = taskToAdd.partition.end; } taskToAdd.pTask->ExecuteRange( taskToAdd.partition, threadNum_ );