1 /* -*- C++ -*- |
1 /* -*- mode: C++; indent-tabs-mode: nil; -*- |
2 * |
2 * |
3 * This file is a part of LEMON, a generic C++ optimization library |
3 * This file is a part of LEMON, a generic C++ optimization library. |
4 * |
4 * |
5 * Copyright (C) 2003-2008 |
5 * Copyright (C) 2003-2010 |
6 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
6 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
7 * (Egervary Research Group on Combinatorial Optimization, EGRES). |
7 * (Egervary Research Group on Combinatorial Optimization, EGRES). |
8 * |
8 * |
9 * Permission to use, modify and distribute this software is granted |
9 * Permission to use, modify and distribute this software is granted |
10 * provided that this copyright notice appears in all copies. For |
10 * provided that this copyright notice appears in all copies. For |
90 /// finding a \ref min_cost_flow "minimum cost flow". |
90 /// finding a \ref min_cost_flow "minimum cost flow". |
91 /// |
91 /// |
92 /// \ref CostScaling implements a cost scaling algorithm that performs |
92 /// \ref CostScaling implements a cost scaling algorithm that performs |
93 /// push/augment and relabel operations for finding a \ref min_cost_flow |
93 /// push/augment and relabel operations for finding a \ref min_cost_flow |
94 /// "minimum cost flow" \ref amo93networkflows, \ref goldberg90approximation, |
94 /// "minimum cost flow" \ref amo93networkflows, \ref goldberg90approximation, |
95 /// \ref goldberg97efficient, \ref bunnagel98efficient. |
95 /// \ref goldberg97efficient, \ref bunnagel98efficient. |
96 /// It is a highly efficient primal-dual solution method, which |
96 /// It is a highly efficient primal-dual solution method, which |
97 /// can be viewed as the generalization of the \ref Preflow |
97 /// can be viewed as the generalization of the \ref Preflow |
98 /// "preflow push-relabel" algorithm for the maximum flow problem. |
98 /// "preflow push-relabel" algorithm for the maximum flow problem. |
99 /// |
99 /// |
100 /// Most of the parameters of the problem (except for the digraph) |
100 /// Most of the parameters of the problem (except for the digraph) |
187 /// admissible arc at once. |
187 /// admissible arc at once. |
188 PUSH, |
188 PUSH, |
189 /// Augment operations are used, i.e. flow is moved on admissible |
189 /// Augment operations are used, i.e. flow is moved on admissible |
190 /// paths from a node with excess to a node with deficit. |
190 /// paths from a node with excess to a node with deficit. |
191 AUGMENT, |
191 AUGMENT, |
192 /// Partial augment operations are used, i.e. flow is moved on |
192 /// Partial augment operations are used, i.e. flow is moved on |
193 /// admissible paths started from a node with excess, but the |
193 /// admissible paths started from a node with excess, but the |
194 /// lengths of these paths are limited. This method can be viewed |
194 /// lengths of these paths are limited. This method can be viewed |
195 /// as a combined version of the previous two operations. |
195 /// as a combined version of the previous two operations. |
196 PARTIAL_AUGMENT |
196 PARTIAL_AUGMENT |
197 }; |
197 }; |
206 typedef std::vector<LargeCost> LargeCostVector; |
206 typedef std::vector<LargeCost> LargeCostVector; |
207 typedef std::vector<char> BoolVector; |
207 typedef std::vector<char> BoolVector; |
208 // Note: vector<char> is used instead of vector<bool> for efficiency reasons |
208 // Note: vector<char> is used instead of vector<bool> for efficiency reasons |
209 |
209 |
210 private: |
210 private: |
211 |
211 |
212 template <typename KT, typename VT> |
212 template <typename KT, typename VT> |
213 class StaticVectorMap { |
213 class StaticVectorMap { |
214 public: |
214 public: |
215 typedef KT Key; |
215 typedef KT Key; |
216 typedef VT Value; |
216 typedef VT Value; |
217 |
217 |
218 StaticVectorMap(std::vector<Value>& v) : _v(v) {} |
218 StaticVectorMap(std::vector<Value>& v) : _v(v) {} |
219 |
219 |
220 const Value& operator[](const Key& key) const { |
220 const Value& operator[](const Key& key) const { |
221 return _v[StaticDigraph::id(key)]; |
221 return _v[StaticDigraph::id(key)]; |
222 } |
222 } |
223 |
223 |
224 Value& operator[](const Key& key) { |
224 Value& operator[](const Key& key) { |
225 return _v[StaticDigraph::id(key)]; |
225 return _v[StaticDigraph::id(key)]; |
226 } |
226 } |
227 |
227 |
228 void set(const Key& key, const Value& val) { |
228 void set(const Key& key, const Value& val) { |
229 _v[StaticDigraph::id(key)] = val; |
229 _v[StaticDigraph::id(key)] = val; |
230 } |
230 } |
231 |
231 |
232 private: |
232 private: |
281 IntVector _buckets; |
281 IntVector _buckets; |
282 IntVector _bucket_next; |
282 IntVector _bucket_next; |
283 IntVector _bucket_prev; |
283 IntVector _bucket_prev; |
284 IntVector _rank; |
284 IntVector _rank; |
285 int _max_rank; |
285 int _max_rank; |
286 |
286 |
287 // Data for a StaticDigraph structure |
287 // Data for a StaticDigraph structure |
288 typedef std::pair<int, int> IntPair; |
288 typedef std::pair<int, int> IntPair; |
289 StaticDigraph _sgr; |
289 StaticDigraph _sgr; |
290 std::vector<IntPair> _arc_vec; |
290 std::vector<IntPair> _arc_vec; |
291 std::vector<LargeCost> _cost_vec; |
291 std::vector<LargeCost> _cost_vec; |
292 LargeCostArcMap _cost_map; |
292 LargeCostArcMap _cost_map; |
293 LargeCostNodeMap _pi_map; |
293 LargeCostNodeMap _pi_map; |
294 |
294 |
295 public: |
295 public: |
296 |
296 |
297 /// \brief Constant for infinite upper bounds (capacities). |
297 /// \brief Constant for infinite upper bounds (capacities). |
298 /// |
298 /// |
299 /// Constant for infinite upper bounds (capacities). |
299 /// Constant for infinite upper bounds (capacities). |
300 /// It is \c std::numeric_limits<Value>::infinity() if available, |
300 /// It is \c std::numeric_limits<Value>::infinity() if available, |
301 /// \c std::numeric_limits<Value>::max() otherwise. |
301 /// \c std::numeric_limits<Value>::max() otherwise. |
346 // Check the number types |
346 // Check the number types |
347 LEMON_ASSERT(std::numeric_limits<Value>::is_signed, |
347 LEMON_ASSERT(std::numeric_limits<Value>::is_signed, |
348 "The flow type of CostScaling must be signed"); |
348 "The flow type of CostScaling must be signed"); |
349 LEMON_ASSERT(std::numeric_limits<Cost>::is_signed, |
349 LEMON_ASSERT(std::numeric_limits<Cost>::is_signed, |
350 "The cost type of CostScaling must be signed"); |
350 "The cost type of CostScaling must be signed"); |
351 |
351 |
352 // Reset data structures |
352 // Reset data structures |
353 reset(); |
353 reset(); |
354 } |
354 } |
355 |
355 |
356 /// \name Parameters |
356 /// \name Parameters |
599 |
599 |
600 _lower.resize(_res_arc_num); |
600 _lower.resize(_res_arc_num); |
601 _upper.resize(_res_arc_num); |
601 _upper.resize(_res_arc_num); |
602 _scost.resize(_res_arc_num); |
602 _scost.resize(_res_arc_num); |
603 _supply.resize(_res_node_num); |
603 _supply.resize(_res_node_num); |
604 |
604 |
605 _res_cap.resize(_res_arc_num); |
605 _res_cap.resize(_res_arc_num); |
606 _cost.resize(_res_arc_num); |
606 _cost.resize(_res_arc_num); |
607 _pi.resize(_res_node_num); |
607 _pi.resize(_res_node_num); |
608 _excess.resize(_res_node_num); |
608 _excess.resize(_res_node_num); |
609 _next_out.resize(_res_node_num); |
609 _next_out.resize(_res_node_num); |
756 _sum_supply = 0; |
756 _sum_supply = 0; |
757 for (int i = 0; i != _root; ++i) { |
757 for (int i = 0; i != _root; ++i) { |
758 _sum_supply += _supply[i]; |
758 _sum_supply += _supply[i]; |
759 } |
759 } |
760 if (_sum_supply > 0) return INFEASIBLE; |
760 if (_sum_supply > 0) return INFEASIBLE; |
761 |
761 |
762 |
762 |
763 // Initialize vectors |
763 // Initialize vectors |
764 for (int i = 0; i != _res_node_num; ++i) { |
764 for (int i = 0; i != _res_node_num; ++i) { |
765 _pi[i] = 0; |
765 _pi[i] = 0; |
766 _excess[i] = _supply[i]; |
766 _excess[i] = _supply[i]; |
767 } |
767 } |
768 |
768 |
769 // Remove infinite upper bounds and check negative arcs |
769 // Remove infinite upper bounds and check negative arcs |
770 const Value MAX = std::numeric_limits<Value>::max(); |
770 const Value MAX = std::numeric_limits<Value>::max(); |
771 int last_out; |
771 int last_out; |
772 if (_have_lower) { |
772 if (_have_lower) { |
773 for (int i = 0; i != _root; ++i) { |
773 for (int i = 0; i != _root; ++i) { |
883 _res_cap[ra] = 0; |
883 _res_cap[ra] = 0; |
884 _cost[a] = 0; |
884 _cost[a] = 0; |
885 _cost[ra] = 0; |
885 _cost[ra] = 0; |
886 } |
886 } |
887 } |
887 } |
888 |
888 |
889 return OPTIMAL; |
889 return OPTIMAL; |
890 } |
890 } |
891 |
891 |
892 // Execute the algorithm and transform the results |
892 // Execute the algorithm and transform the results |
893 void start(Method method) { |
893 void start(Method method) { |
894 // Maximum path length for partial augment |
894 // Maximum path length for partial augment |
895 const int MAX_PATH_LENGTH = 4; |
895 const int MAX_PATH_LENGTH = 4; |
896 |
896 |
897 // Initialize data structures for buckets |
897 // Initialize data structures for buckets |
898 _max_rank = _alpha * _res_node_num; |
898 _max_rank = _alpha * _res_node_num; |
899 _buckets.resize(_max_rank); |
899 _buckets.resize(_max_rank); |
900 _bucket_next.resize(_res_node_num + 1); |
900 _bucket_next.resize(_res_node_num + 1); |
901 _bucket_prev.resize(_res_node_num + 1); |
901 _bucket_prev.resize(_res_node_num + 1); |
902 _rank.resize(_res_node_num + 1); |
902 _rank.resize(_res_node_num + 1); |
903 |
903 |
904 // Execute the algorithm |
904 // Execute the algorithm |
905 switch (method) { |
905 switch (method) { |
906 case PUSH: |
906 case PUSH: |
907 startPush(); |
907 startPush(); |
908 break; |
908 break; |
937 for (int j = 0; j != limit; ++j) { |
937 for (int j = 0; j != limit; ++j) { |
938 if (!_forward[j]) _res_cap[j] += _lower[j]; |
938 if (!_forward[j]) _res_cap[j] += _lower[j]; |
939 } |
939 } |
940 } |
940 } |
941 } |
941 } |
942 |
942 |
943 // Initialize a cost scaling phase |
943 // Initialize a cost scaling phase |
944 void initPhase() { |
944 void initPhase() { |
945 // Saturate arcs not satisfying the optimality condition |
945 // Saturate arcs not satisfying the optimality condition |
946 for (int u = 0; u != _res_node_num; ++u) { |
946 for (int u = 0; u != _res_node_num; ++u) { |
947 int last_out = _first_out[u+1]; |
947 int last_out = _first_out[u+1]; |
955 _res_cap[a] = 0; |
955 _res_cap[a] = 0; |
956 _res_cap[_reverse[a]] += delta; |
956 _res_cap[_reverse[a]] += delta; |
957 } |
957 } |
958 } |
958 } |
959 } |
959 } |
960 |
960 |
961 // Find active nodes (i.e. nodes with positive excess) |
961 // Find active nodes (i.e. nodes with positive excess) |
962 for (int u = 0; u != _res_node_num; ++u) { |
962 for (int u = 0; u != _res_node_num; ++u) { |
963 if (_excess[u] > 0) _active_nodes.push_back(u); |
963 if (_excess[u] > 0) _active_nodes.push_back(u); |
964 } |
964 } |
965 |
965 |
966 // Initialize the next arcs |
966 // Initialize the next arcs |
967 for (int u = 0; u != _res_node_num; ++u) { |
967 for (int u = 0; u != _res_node_num; ++u) { |
968 _next_out[u] = _first_out[u]; |
968 _next_out[u] = _first_out[u]; |
969 } |
969 } |
970 } |
970 } |
971 |
971 |
972 // Early termination heuristic |
972 // Early termination heuristic |
973 bool earlyTermination() { |
973 bool earlyTermination() { |
974 const double EARLY_TERM_FACTOR = 3.0; |
974 const double EARLY_TERM_FACTOR = 3.0; |
975 |
975 |
976 // Build a static residual graph |
976 // Build a static residual graph |
996 } |
996 } |
997 |
997 |
998 // Global potential update heuristic |
998 // Global potential update heuristic |
999 void globalUpdate() { |
999 void globalUpdate() { |
1000 int bucket_end = _root + 1; |
1000 int bucket_end = _root + 1; |
1001 |
1001 |
1002 // Initialize buckets |
1002 // Initialize buckets |
1003 for (int r = 0; r != _max_rank; ++r) { |
1003 for (int r = 0; r != _max_rank; ++r) { |
1004 _buckets[r] = bucket_end; |
1004 _buckets[r] = bucket_end; |
1005 } |
1005 } |
1006 Value total_excess = 0; |
1006 Value total_excess = 0; |
1022 for ( ; r != _max_rank; ++r) { |
1022 for ( ; r != _max_rank; ++r) { |
1023 while (_buckets[r] != bucket_end) { |
1023 while (_buckets[r] != bucket_end) { |
1024 // Remove the first node from the current bucket |
1024 // Remove the first node from the current bucket |
1025 int u = _buckets[r]; |
1025 int u = _buckets[r]; |
1026 _buckets[r] = _bucket_next[u]; |
1026 _buckets[r] = _bucket_next[u]; |
1027 |
1027 |
1028 // Search the incomming arcs of u |
1028 // Search the incomming arcs of u |
1029 LargeCost pi_u = _pi[u]; |
1029 LargeCost pi_u = _pi[u]; |
1030 int last_out = _first_out[u+1]; |
1030 int last_out = _first_out[u+1]; |
1031 for (int a = _first_out[u]; a != last_out; ++a) { |
1031 for (int a = _first_out[u]; a != last_out; ++a) { |
1032 int ra = _reverse[a]; |
1032 int ra = _reverse[a]; |
1037 // Compute the new rank of v |
1037 // Compute the new rank of v |
1038 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; |
1038 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; |
1039 int new_rank_v = old_rank_v; |
1039 int new_rank_v = old_rank_v; |
1040 if (nrc < LargeCost(_max_rank)) |
1040 if (nrc < LargeCost(_max_rank)) |
1041 new_rank_v = r + 1 + int(nrc); |
1041 new_rank_v = r + 1 + int(nrc); |
1042 |
1042 |
1043 // Change the rank of v |
1043 // Change the rank of v |
1044 if (new_rank_v < old_rank_v) { |
1044 if (new_rank_v < old_rank_v) { |
1045 _rank[v] = new_rank_v; |
1045 _rank[v] = new_rank_v; |
1046 _next_out[v] = _first_out[v]; |
1046 _next_out[v] = _first_out[v]; |
1047 |
1047 |
1048 // Remove v from its old bucket |
1048 // Remove v from its old bucket |
1049 if (old_rank_v < _max_rank) { |
1049 if (old_rank_v < _max_rank) { |
1050 if (_buckets[old_rank_v] == v) { |
1050 if (_buckets[old_rank_v] == v) { |
1051 _buckets[old_rank_v] = _bucket_next[v]; |
1051 _buckets[old_rank_v] = _bucket_next[v]; |
1052 } else { |
1052 } else { |
1053 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; |
1053 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; |
1054 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; |
1054 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; |
1055 } |
1055 } |
1056 } |
1056 } |
1057 |
1057 |
1058 // Insert v to its new bucket |
1058 // Insert v to its new bucket |
1059 _bucket_next[v] = _buckets[new_rank_v]; |
1059 _bucket_next[v] = _buckets[new_rank_v]; |
1060 _bucket_prev[_buckets[new_rank_v]] = v; |
1060 _bucket_prev[_buckets[new_rank_v]] = v; |
1061 _buckets[new_rank_v] = v; |
1061 _buckets[new_rank_v] = v; |
1062 } |
1062 } |
1070 if (total_excess <= 0) break; |
1070 if (total_excess <= 0) break; |
1071 } |
1071 } |
1072 } |
1072 } |
1073 if (total_excess <= 0) break; |
1073 if (total_excess <= 0) break; |
1074 } |
1074 } |
1075 |
1075 |
1076 // Relabel nodes |
1076 // Relabel nodes |
1077 for (int u = 0; u != _res_node_num; ++u) { |
1077 for (int u = 0; u != _res_node_num; ++u) { |
1078 int k = std::min(_rank[u], r); |
1078 int k = std::min(_rank[u], r); |
1079 if (k > 0) { |
1079 if (k > 0) { |
1080 _pi[u] -= _epsilon * k; |
1080 _pi[u] -= _epsilon * k; |
1090 const double GLOBAL_UPDATE_FACTOR = 3.0; |
1090 const double GLOBAL_UPDATE_FACTOR = 3.0; |
1091 |
1091 |
1092 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * |
1092 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * |
1093 (_res_node_num + _sup_node_num * _sup_node_num)); |
1093 (_res_node_num + _sup_node_num * _sup_node_num)); |
1094 int next_update_limit = global_update_freq; |
1094 int next_update_limit = global_update_freq; |
1095 |
1095 |
1096 int relabel_cnt = 0; |
1096 int relabel_cnt = 0; |
1097 |
1097 |
1098 // Perform cost scaling phases |
1098 // Perform cost scaling phases |
1099 std::vector<int> path; |
1099 std::vector<int> path; |
1100 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1100 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1101 1 : _epsilon / _alpha ) |
1101 1 : _epsilon / _alpha ) |
1102 { |
1102 { |
1103 // Early termination heuristic |
1103 // Early termination heuristic |
1104 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
1104 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
1105 if (earlyTermination()) break; |
1105 if (earlyTermination()) break; |
1106 } |
1106 } |
1107 |
1107 |
1108 // Initialize current phase |
1108 // Initialize current phase |
1109 initPhase(); |
1109 initPhase(); |
1110 |
1110 |
1111 // Perform partial augment and relabel operations |
1111 // Perform partial augment and relabel operations |
1112 while (true) { |
1112 while (true) { |
1113 // Select an active node (FIFO selection) |
1113 // Select an active node (FIFO selection) |
1114 while (_active_nodes.size() > 0 && |
1114 while (_active_nodes.size() > 0 && |
1115 _excess[_active_nodes.front()] <= 0) { |
1115 _excess[_active_nodes.front()] <= 0) { |
1194 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * |
1194 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * |
1195 (_res_node_num + _sup_node_num * _sup_node_num)); |
1195 (_res_node_num + _sup_node_num * _sup_node_num)); |
1196 int next_update_limit = global_update_freq; |
1196 int next_update_limit = global_update_freq; |
1197 |
1197 |
1198 int relabel_cnt = 0; |
1198 int relabel_cnt = 0; |
1199 |
1199 |
1200 // Perform cost scaling phases |
1200 // Perform cost scaling phases |
1201 BoolVector hyper(_res_node_num, false); |
1201 BoolVector hyper(_res_node_num, false); |
1202 LargeCostVector hyper_cost(_res_node_num); |
1202 LargeCostVector hyper_cost(_res_node_num); |
1203 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1203 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1204 1 : _epsilon / _alpha ) |
1204 1 : _epsilon / _alpha ) |
1205 { |
1205 { |
1206 // Early termination heuristic |
1206 // Early termination heuristic |
1207 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
1207 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
1208 if (earlyTermination()) break; |
1208 if (earlyTermination()) break; |
1209 } |
1209 } |
1210 |
1210 |
1211 // Initialize current phase |
1211 // Initialize current phase |
1212 initPhase(); |
1212 initPhase(); |
1213 |
1213 |
1214 // Perform push and relabel operations |
1214 // Perform push and relabel operations |
1215 while (_active_nodes.size() > 0) { |
1215 while (_active_nodes.size() > 0) { |
1220 next_node: |
1220 next_node: |
1221 // Select an active node (FIFO selection) |
1221 // Select an active node (FIFO selection) |
1222 n = _active_nodes.front(); |
1222 n = _active_nodes.front(); |
1223 last_out = _first_out[n+1]; |
1223 last_out = _first_out[n+1]; |
1224 pi_n = _pi[n]; |
1224 pi_n = _pi[n]; |
1225 |
1225 |
1226 // Perform push operations if there are admissible arcs |
1226 // Perform push operations if there are admissible arcs |
1227 if (_excess[n] > 0) { |
1227 if (_excess[n] > 0) { |
1228 for (a = _next_out[n]; a != last_out; ++a) { |
1228 for (a = _next_out[n]; a != last_out; ++a) { |
1229 if (_res_cap[a] > 0 && |
1229 if (_res_cap[a] > 0 && |
1230 _cost[a] + pi_n - _pi[_target[a]] < 0) { |
1230 _cost[a] + pi_n - _pi[_target[a]] < 0) { |
1234 // Push-look-ahead heuristic |
1234 // Push-look-ahead heuristic |
1235 Value ahead = -_excess[t]; |
1235 Value ahead = -_excess[t]; |
1236 int last_out_t = _first_out[t+1]; |
1236 int last_out_t = _first_out[t+1]; |
1237 LargeCost pi_t = _pi[t]; |
1237 LargeCost pi_t = _pi[t]; |
1238 for (int ta = _next_out[t]; ta != last_out_t; ++ta) { |
1238 for (int ta = _next_out[t]; ta != last_out_t; ++ta) { |
1239 if (_res_cap[ta] > 0 && |
1239 if (_res_cap[ta] > 0 && |
1240 _cost[ta] + pi_t - _pi[_target[ta]] < 0) |
1240 _cost[ta] + pi_t - _pi[_target[ta]] < 0) |
1241 ahead += _res_cap[ta]; |
1241 ahead += _res_cap[ta]; |
1242 if (ahead >= delta) break; |
1242 if (ahead >= delta) break; |
1243 } |
1243 } |
1244 if (ahead < 0) ahead = 0; |
1244 if (ahead < 0) ahead = 0; |
1285 _pi[n] -= min_red_cost + _epsilon; |
1285 _pi[n] -= min_red_cost + _epsilon; |
1286 _next_out[n] = _first_out[n]; |
1286 _next_out[n] = _first_out[n]; |
1287 hyper[n] = false; |
1287 hyper[n] = false; |
1288 ++relabel_cnt; |
1288 ++relabel_cnt; |
1289 } |
1289 } |
1290 |
1290 |
1291 // Remove nodes that are not active nor hyper |
1291 // Remove nodes that are not active nor hyper |
1292 remove_nodes: |
1292 remove_nodes: |
1293 while ( _active_nodes.size() > 0 && |
1293 while ( _active_nodes.size() > 0 && |
1294 _excess[_active_nodes.front()] <= 0 && |
1294 _excess[_active_nodes.front()] <= 0 && |
1295 !hyper[_active_nodes.front()] ) { |
1295 !hyper[_active_nodes.front()] ) { |
1296 _active_nodes.pop_front(); |
1296 _active_nodes.pop_front(); |
1297 } |
1297 } |
1298 |
1298 |
1299 // Global update heuristic |
1299 // Global update heuristic |
1300 if (relabel_cnt >= next_update_limit) { |
1300 if (relabel_cnt >= next_update_limit) { |
1301 globalUpdate(); |
1301 globalUpdate(); |
1302 for (int u = 0; u != _res_node_num; ++u) |
1302 for (int u = 0; u != _res_node_num; ++u) |
1303 hyper[u] = false; |
1303 hyper[u] = false; |