@@ -311,10 +311,16 @@ std::shared_future<void> BasicTaskflow<E>::run_n(Framework& f, size_t repeat, C&
311311
312312 auto &tpg = _topologies.emplace_front (f, repeat);
313313 f._topologies .push_back (&tpg);
314-
314+
315+ // PV 1/31 (twhuang): Lambda in C++17 is by default inline - no need for static
315316 static const auto setup_topology = [](auto & f, auto & tpg) {
317+
316318 for (auto & n: f._graph ) {
317- // // TODO: swap this with the last and then pop_back
319+
320+ // PR 1/31 (twhuang): I don't think we need check the emptiness
321+ // of the successors over here
322+ // Also, when do you clean up dynamic tasking nodes?
323+ //
318324 if (!n._successors .empty ()) {
319325 for (size_t i=0 ; i<n._successors .size (); i++) {
320326 if (n._successors [i] == f._last_target ) {
@@ -336,18 +342,23 @@ std::shared_future<void> BasicTaskflow<E>::run_n(Framework& f, size_t repeat, C&
336342 }
337343 };
338344
345+ // PV 1/31 (twhuang): single worker - we need to remove topologies?
346+
339347 // Iterative execution to avoid stack overflow
340348 if (num_workers () == 0 ) {
341349 // Clear last execution data & Build precedence between nodes and target
342350 setup_topology (f, tpg);
343351
344352 tpg._target ._work = std::forward<C>(c);
353+
354+ // PR 1/31 (twhuang): redundant tgt_predecessors
345355 const int tgt_predecessor = tpg._target ._predecessors .size ();
346356
347357 for (size_t i=0 ; i<repeat; i++) {
348358
349359 _schedule (tpg._sources );
350-
360+
361+ // PR 1/31 (twhuang): why do we need to set the dependents again?
351362 // Reset target
352363 f._topologies .front ()->_target ._predecessors .resize (tgt_predecessor);
353364 f._topologies .front ()->_target ._dependents = tgt_predecessor;
@@ -370,7 +381,8 @@ std::shared_future<void> BasicTaskflow<E>::run_n(Framework& f, size_t repeat, C&
370381 tgt_predecessor = tpg._target ._predecessors .size (), this ]() mutable {
371382
372383 std::invoke (c);
373-
384+
385+ // PV 1/31 (twhuang): thread safety?
374386 // case 1: we still need to run the topology again
375387 if (--f._topologies .front ()->_repeat != 0 ) {
376388
@@ -450,7 +462,9 @@ void BasicTaskflow<E>::Closure::operator () () const {
450462 }
451463 // subflow node type
452464 else {
453-
465+
466+ // PV 1/31 (twhuang): emplace is enough
467+ //
454468 // Clear the subgraph before the task execution
455469 if (!node->_spawned ) {
456470 node->_subgraph .reset ();
@@ -485,8 +499,12 @@ void BasicTaskflow<E>::Closure::operator () () const {
485499 }
486500 }
487501 }
488- } // End of DynamicWork -------------------------------------------------------------------------
489-
502+ } // End of DynamicWork -----------------------------------------------------
503+
504+ // PV 1/31 (twhuang): probably can add a bitwise state for each node?
505+ // Also I think the while loop can be improved.
506+ // Why do we need num_successors in if?
507+ //
490508 // Recover the runtime change due to dynamic tasking except the target & spawn tasks
491509 // This must be done before scheduling the successors, otherwise this might cause
492510 // race condition on the _dependents
@@ -498,7 +516,7 @@ void BasicTaskflow<E>::Closure::operator () () const {
498516 node->_spawned = false ;
499517 }
500518
501- // At this point, the node/node storage might be destructed.
519+ // At this point, the node storage might be destructed.
502520 for (size_t i=0 ; i<num_successors; ++i) {
503521 if (--(node->_successors [i]->_dependents ) == 0 ) {
504522 taskflow->_schedule (*(node->_successors [i]));
0 commit comments