33/* * @file Impls for DecompNetlistRouter */
44
55#include " DecompNetlistRouter.h"
6+ #include " globals.h"
67#include " netlist_routers.h"
78#include " route_net.h"
89#include " sink_sampling.h"
@@ -21,25 +22,43 @@ inline RouteIterResults DecompNetlistRouter<HeapType>::route_netlist(int itry, f
2122 _pres_fac = pres_fac;
2223 _worst_neg_slack = worst_neg_slack;
2324
25+ vtr::Timer t;
26+
2427 /* Organize netlist into a PartitionTree.
2528 * Nets in a given level of nodes are guaranteed to not have any overlapping bounding boxes, so they can be routed in parallel. */
26- PartitionTree tree (_net_list);
29+ if (!_tree){
30+ _tree = PartitionTree (_net_list);
31+ PartitionTreeDebug::log (" Iteration " + std::to_string (itry) + " : built partition tree in " + std::to_string (t.elapsed_sec ()) + " s" );
32+ }
33+
34+ /* Remove all virtual nets: we will create them for each iteration */
35+ _tree->clear_vnets ();
2736
2837 /* Put the root node on the task queue, which will add its child nodes when it's finished. Wait until the entire tree gets routed. */
2938 tbb::task_group g;
30- route_partition_tree_node (g, tree. root ());
39+ route_partition_tree_node (g, _tree-> root ());
3140 g.wait ();
41+ PartitionTreeDebug::log (" Routing all nets took " + std::to_string (t.elapsed_sec ()) + " s" );
3242
3343 /* Combine results from threads */
3444 RouteIterResults out;
3545 for (auto & results : _results_th) {
3646 out.stats .combine (results.stats );
3747 out.rerouted_nets .insert (out.rerouted_nets .end (), results.rerouted_nets .begin (), results.rerouted_nets .end ());
48+ out.bb_updated_nets .insert (out.bb_updated_nets .end (), results.bb_updated_nets .begin (), results.bb_updated_nets .end ());
3849 out.is_routable &= results.is_routable ;
3950 }
51+
4052 return out;
4153}
4254
55+ /* TODO: Handle this in route_netlist */
56+ template <typename HeapType>
57+ void DecompNetlistRouter<HeapType>::handle_bb_updated_nets(const std::vector<ParentNetId>& nets) {
58+ VTR_ASSERT (_tree);
59+ _tree->update_nets (nets);
60+ }
61+
4362template <typename HeapType>
4463void DecompNetlistRouter<HeapType>::set_rcv_enabled(bool x) {
4564 if (x)
@@ -120,6 +139,9 @@ inline bool should_decompose_vnet(const VirtualNet& vnet, const PartitionTreeNod
120139template <typename HeapType>
121140void DecompNetlistRouter<HeapType>::route_partition_tree_node(tbb::task_group& g, PartitionTreeNode& node) {
122141 auto & route_ctx = g_vpr_ctx.mutable_routing ();
142+ vtr::Timer t;
143+
144+ std::vector<ParentNetId> nets (node.nets .begin (), node.nets .end ());
123145
124146 /* Sort so that nets with the most sinks are routed first.
125147 * We want to interleave virtual nets with regular ones, so sort an "index vector"
@@ -129,15 +151,14 @@ void DecompNetlistRouter<HeapType>::route_partition_tree_node(tbb::task_group& g
129151 std::vector<size_t > order (node.nets .size () + node.vnets .size ());
130152 std::iota (order.begin (), order.end (), 0 );
131153 std::stable_sort (order.begin (), order.end (), [&](size_t i, size_t j) -> bool {
132- ParentNetId id1 = i < node.nets .size () ? node. nets [i] : node.vnets [i - node. nets .size ()].net_id ;
133- ParentNetId id2 = j < node.nets .size () ? node. nets [j] : node.vnets [j - node. nets .size ()].net_id ;
154+ ParentNetId id1 = i < node.nets .size () ? nets[i] : node.vnets [i - nets.size ()].net_id ;
155+ ParentNetId id2 = j < node.nets .size () ? nets[j] : node.vnets [j - nets.size ()].net_id ;
134156 return _net_list.net_sinks (id1).size () > _net_list.net_sinks (id2).size ();
135157 });
136158
137- vtr::Timer t;
138159 for (size_t i : order) {
139- if (i < node. nets .size ()) { /* Regular net (not decomposed) */
140- ParentNetId net_id = node. nets [i];
160+ if (i < nets.size ()) { /* Regular net (not decomposed) */
161+ ParentNetId net_id = nets[i];
141162 if (!should_route_net (_net_list, net_id, _connections_inf, _budgeting_inf, _worst_neg_slack, true ))
142163 continue ;
143164 /* Setup the net (reset or prune) only once here in the flow. Then all calls to route_net turn off auto-setup */
@@ -188,6 +209,7 @@ void DecompNetlistRouter<HeapType>::route_partition_tree_node(tbb::task_group& g
188209 if (flags.retry_with_full_bb ) {
189210 /* ConnectionRouter thinks we should grow the BB. Do that and leave this net unrouted for now */
190211 route_ctx.route_bb [net_id] = full_device_bb ();
212+ _results_th.local ().bb_updated_nets .push_back (net_id);
191213 /* Disable decomposition for nets like this: they're already problematic */
192214 _is_decomp_disabled[net_id] = true ;
193215 continue ;
@@ -206,7 +228,7 @@ void DecompNetlistRouter<HeapType>::route_partition_tree_node(tbb::task_group& g
206228 continue ;
207229 }
208230 }
209- /* Route the full vnet. Again we don't care about the flags, they should be handled by the regular path */
231+ /* Route the full vnet. We don't care about the flags, they should be handled by the regular path */
210232 auto sink_mask = get_vnet_sink_mask (vnet);
211233 route_net (
212234 _routers_th.local (),
@@ -277,7 +299,7 @@ inline void make_vnet_pair(ParentNetId net_id, const t_bb& bb, Axis cutline_axis
277299
278300template <typename HeapType>
279301bool DecompNetlistRouter<HeapType>::decompose_and_route_net(ParentNetId net_id, const PartitionTreeNode& node, VirtualNet& left, VirtualNet& right) {
280- auto & route_ctx = g_vpr_ctx.routing ();
302+ auto & route_ctx = g_vpr_ctx.mutable_routing ();
281303 auto & net_bb = route_ctx.route_bb [net_id];
282304
283305 /* Sample enough sinks to provide branch-off points to the virtual nets we create */
@@ -382,7 +404,7 @@ inline std::string describe_vnet(const VirtualNet& vnet) {
382404template <typename HeapType>
383405bool DecompNetlistRouter<HeapType>::decompose_and_route_vnet(VirtualNet& vnet, const PartitionTreeNode& node, VirtualNet& left, VirtualNet& right) {
384406 /* Sample enough sinks to provide branch-off points to the virtual nets we create */
385- auto sink_mask = get_vnet_decomposition_mask (vnet, node);
407+ auto sink_mask = get_decomposition_mask_vnet (vnet, node);
386408
387409 /* Route the *parent* net with the given mask: only the sinks we ask for will be routed */
388410 auto flags = route_net (
@@ -499,6 +521,7 @@ inline bool get_reduction_mask(ParentNetId net_id, Axis cutline_axis, int cutlin
499521template <typename HeapType>
500522vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_decomposition_mask(ParentNetId net_id, const PartitionTreeNode& node) {
501523 const auto & route_ctx = g_vpr_ctx.routing ();
524+
502525 const RouteTree& tree = route_ctx.route_trees [net_id].value ();
503526 size_t num_sinks = tree.num_sinks ();
504527
@@ -512,6 +535,7 @@ vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_decomposition_mask(Pare
512535 bool is_reduced = get_reduction_mask (net_id, node.cutline_axis , node.cutline_pos , out);
513536
514537 bool source_on_cutline = is_close_to_cutline (tree.root ().inode , node.cutline_axis , node.cutline_pos , 1 );
538+
515539 if (!is_reduced || source_on_cutline)
516540 convex_hull_downsample (net_id, route_ctx.route_bb [net_id], out);
517541
@@ -638,7 +662,7 @@ inline bool get_reduction_mask_vnet_with_source(const VirtualNet& vnet, Axis cut
638662}
639663
640664template <typename HeapType>
641- vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_vnet_decomposition_mask (const VirtualNet& vnet, const PartitionTreeNode& node) {
665+ vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_decomposition_mask_vnet (const VirtualNet& vnet, const PartitionTreeNode& node) {
642666 const auto & route_ctx = g_vpr_ctx.routing ();
643667 const RouteTree& tree = route_ctx.route_trees [vnet.net_id ].value ();
644668 int num_sinks = tree.num_sinks ();
@@ -650,10 +674,11 @@ vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_vnet_decomposition_mask
650674 * sinks in the small side and unblock. Add convex hull since we are in a vnet which
651675 * may not have a source at all */
652676 if (inside_bb (tree.root ().inode , vnet.clipped_bb )) { /* We have source, no need to sample after reduction in most cases */
653- bool is_reduced = get_reduction_mask_vnet_with_source (vnet, node.cutline_axis , node.cutline_pos , out);
677+ bool is_reduced = get_reduction_mask_vnet_with_source (vnet, node.cutline_axis , node.cutline_pos , out);
654678 bool source_on_cutline = is_close_to_cutline (tree.root ().inode , node.cutline_axis , node.cutline_pos , 1 );
655- if (!is_reduced || source_on_cutline)
679+ if (!is_reduced || source_on_cutline){
656680 convex_hull_downsample (vnet.net_id , vnet.clipped_bb , out);
681+ }
657682 } else {
658683 int reduced_sides = get_reduction_mask_vnet_no_source (vnet, node.cutline_axis , node.cutline_pos , out);
659684 if (reduced_sides < 2 ) {
@@ -666,9 +691,11 @@ vtr::dynamic_bitset<> DecompNetlistRouter<HeapType>::get_vnet_decomposition_mask
666691 /* Sample if a sink is too close to the cutline (and unreached).
667692 * Those sinks are likely to fail routing */
668693 for (size_t isink : isinks) {
694+ RRNodeId rr_sink = route_ctx.net_rr_terminals [vnet.net_id ][isink];
695+ if (!inside_bb (rr_sink, vnet.clipped_bb ))
696+ continue ;
669697 if (is_isink_reached.get (isink))
670698 continue ;
671- RRNodeId rr_sink = route_ctx.net_rr_terminals [vnet.net_id ][isink];
672699 if (is_close_to_cutline (rr_sink, node.cutline_axis , node.cutline_pos , 1 )) {
673700 out.set (isink, true );
674701 continue ;
0 commit comments