... |
... |
@@ -199,3 +199,2 @@
|
199 |
199 |
typedef std::vector<int> IntVector;
|
200 |
|
typedef std::vector<char> BoolVector;
|
201 |
200 |
typedef std::vector<Value> ValueVector;
|
... |
... |
@@ -203,2 +202,4 @@
|
203 |
202 |
typedef std::vector<LargeCost> LargeCostVector;
|
|
203 |
typedef std::vector<char> BoolVector;
|
|
204 |
// Note: vector<char> is used instead of vector<bool> for efficiency reasons
|
204 |
205 |
|
... |
... |
@@ -246,2 +247,3 @@
|
246 |
247 |
Value _sum_supply;
|
|
248 |
int _sup_node_num;
|
247 |
249 |
|
... |
... |
@@ -274,2 +276,8 @@
|
274 |
276 |
|
|
277 |
IntVector _buckets;
|
|
278 |
IntVector _bucket_next;
|
|
279 |
IntVector _bucket_prev;
|
|
280 |
IntVector _rank;
|
|
281 |
int _max_rank;
|
|
282 |
|
275 |
283 |
// Data for a StaticDigraph structure
|
... |
... |
@@ -804,2 +812,7 @@
|
804 |
812 |
|
|
813 |
_sup_node_num = 0;
|
|
814 |
for (NodeIt n(_graph); n != INVALID; ++n) {
|
|
815 |
if (sup[n] > 0) ++_sup_node_num;
|
|
816 |
}
|
|
817 |
|
805 |
818 |
// Find a feasible flow using Circulation
|
... |
... |
@@ -838,3 +851,3 @@
|
838 |
851 |
int ra = _reverse[a];
|
839 |
|
_res_cap[a] = 1;
|
|
852 |
_res_cap[a] = 0;
|
840 |
853 |
_res_cap[ra] = 0;
|
... |
... |
@@ -853,2 +866,9 @@
|
853 |
866 |
|
|
867 |
// Initialize data structures for buckets
|
|
868 |
_max_rank = _alpha * _res_node_num;
|
|
869 |
_buckets.resize(_max_rank);
|
|
870 |
_bucket_next.resize(_res_node_num + 1);
|
|
871 |
_bucket_prev.resize(_res_node_num + 1);
|
|
872 |
_rank.resize(_res_node_num + 1);
|
|
873 |
|
854 |
874 |
// Execute the algorithm
|
... |
... |
@@ -892,43 +912,14 @@
|
892 |
912 |
|
893 |
|
/// Execute the algorithm performing augment and relabel operations
|
894 |
|
void startAugment(int max_length = std::numeric_limits<int>::max()) {
|
895 |
|
// Paramters for heuristics
|
896 |
|
const int BF_HEURISTIC_EPSILON_BOUND = 1000;
|
897 |
|
const int BF_HEURISTIC_BOUND_FACTOR = 3;
|
898 |
|
|
899 |
|
// Perform cost scaling phases
|
900 |
|
IntVector pred_arc(_res_node_num);
|
901 |
|
std::vector<int> path_nodes;
|
902 |
|
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
|
903 |
|
1 : _epsilon / _alpha )
|
904 |
|
{
|
905 |
|
// "Early Termination" heuristic: use Bellman-Ford algorithm
|
906 |
|
// to check if the current flow is optimal
|
907 |
|
if (_epsilon <= BF_HEURISTIC_EPSILON_BOUND) {
|
908 |
|
_arc_vec.clear();
|
909 |
|
_cost_vec.clear();
|
910 |
|
for (int j = 0; j != _res_arc_num; ++j) {
|
911 |
|
if (_res_cap[j] > 0) {
|
912 |
|
_arc_vec.push_back(IntPair(_source[j], _target[j]));
|
913 |
|
_cost_vec.push_back(_cost[j] + 1);
|
914 |
|
}
|
915 |
|
}
|
916 |
|
_sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
|
917 |
|
|
918 |
|
BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map);
|
919 |
|
bf.init(0);
|
920 |
|
bool done = false;
|
921 |
|
int K = int(BF_HEURISTIC_BOUND_FACTOR * sqrt(_res_node_num));
|
922 |
|
for (int i = 0; i < K && !done; ++i)
|
923 |
|
done = bf.processNextWeakRound();
|
924 |
|
if (done) break;
|
925 |
|
}
|
926 |
|
|
|
913 |
// Initialize a cost scaling phase
|
|
914 |
void initPhase() {
|
927 |
915 |
// Saturate arcs not satisfying the optimality condition
|
928 |
|
for (int a = 0; a != _res_arc_num; ++a) {
|
929 |
|
if (_res_cap[a] > 0 &&
|
930 |
|
_cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) {
|
|
916 |
for (int u = 0; u != _res_node_num; ++u) {
|
|
917 |
int last_out = _first_out[u+1];
|
|
918 |
LargeCost pi_u = _pi[u];
|
|
919 |
for (int a = _first_out[u]; a != last_out; ++a) {
|
|
920 |
int v = _target[a];
|
|
921 |
if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) {
|
931 |
922 |
Value delta = _res_cap[a];
|
932 |
|
_excess[_source[a]] -= delta;
|
933 |
|
_excess[_target[a]] += delta;
|
|
923 |
_excess[u] -= delta;
|
|
924 |
_excess[v] += delta;
|
934 |
925 |
_res_cap[a] = 0;
|
... |
... |
@@ -937,2 +928,3 @@
|
937 |
928 |
}
|
|
929 |
}
|
938 |
930 |
|
... |
... |
@@ -947,2 +939,142 @@
|
947 |
939 |
}
|
|
940 |
}
|
|
941 |
|
|
942 |
// Early termination heuristic
|
|
943 |
bool earlyTermination() {
|
|
944 |
const double EARLY_TERM_FACTOR = 3.0;
|
|
945 |
|
|
946 |
// Build a static residual graph
|
|
947 |
_arc_vec.clear();
|
|
948 |
_cost_vec.clear();
|
|
949 |
for (int j = 0; j != _res_arc_num; ++j) {
|
|
950 |
if (_res_cap[j] > 0) {
|
|
951 |
_arc_vec.push_back(IntPair(_source[j], _target[j]));
|
|
952 |
_cost_vec.push_back(_cost[j] + 1);
|
|
953 |
}
|
|
954 |
}
|
|
955 |
_sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
|
|
956 |
|
|
957 |
// Run Bellman-Ford algorithm to check if the current flow is optimal
|
|
958 |
BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map);
|
|
959 |
bf.init(0);
|
|
960 |
bool done = false;
|
|
961 |
int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num)));
|
|
962 |
for (int i = 0; i < K && !done; ++i) {
|
|
963 |
done = bf.processNextWeakRound();
|
|
964 |
}
|
|
965 |
return done;
|
|
966 |
}
|
|
967 |
|
|
968 |
// Global potential update heuristic
|
|
969 |
void globalUpdate() {
|
|
970 |
int bucket_end = _root + 1;
|
|
971 |
|
|
972 |
// Initialize buckets
|
|
973 |
for (int r = 0; r != _max_rank; ++r) {
|
|
974 |
_buckets[r] = bucket_end;
|
|
975 |
}
|
|
976 |
Value total_excess = 0;
|
|
977 |
for (int i = 0; i != _res_node_num; ++i) {
|
|
978 |
if (_excess[i] < 0) {
|
|
979 |
_rank[i] = 0;
|
|
980 |
_bucket_next[i] = _buckets[0];
|
|
981 |
_bucket_prev[_buckets[0]] = i;
|
|
982 |
_buckets[0] = i;
|
|
983 |
} else {
|
|
984 |
total_excess += _excess[i];
|
|
985 |
_rank[i] = _max_rank;
|
|
986 |
}
|
|
987 |
}
|
|
988 |
if (total_excess == 0) return;
|
|
989 |
|
|
990 |
// Search the buckets
|
|
991 |
int r = 0;
|
|
992 |
for ( ; r != _max_rank; ++r) {
|
|
993 |
while (_buckets[r] != bucket_end) {
|
|
994 |
// Remove the first node from the current bucket
|
|
995 |
int u = _buckets[r];
|
|
996 |
_buckets[r] = _bucket_next[u];
|
|
997 |
|
|
998 |
// Search the incomming arcs of u
|
|
999 |
LargeCost pi_u = _pi[u];
|
|
1000 |
int last_out = _first_out[u+1];
|
|
1001 |
for (int a = _first_out[u]; a != last_out; ++a) {
|
|
1002 |
int ra = _reverse[a];
|
|
1003 |
if (_res_cap[ra] > 0) {
|
|
1004 |
int v = _source[ra];
|
|
1005 |
int old_rank_v = _rank[v];
|
|
1006 |
if (r < old_rank_v) {
|
|
1007 |
// Compute the new rank of v
|
|
1008 |
LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon;
|
|
1009 |
int new_rank_v = old_rank_v;
|
|
1010 |
if (nrc < LargeCost(_max_rank))
|
|
1011 |
new_rank_v = r + 1 + int(nrc);
|
|
1012 |
|
|
1013 |
// Change the rank of v
|
|
1014 |
if (new_rank_v < old_rank_v) {
|
|
1015 |
_rank[v] = new_rank_v;
|
|
1016 |
_next_out[v] = _first_out[v];
|
|
1017 |
|
|
1018 |
// Remove v from its old bucket
|
|
1019 |
if (old_rank_v < _max_rank) {
|
|
1020 |
if (_buckets[old_rank_v] == v) {
|
|
1021 |
_buckets[old_rank_v] = _bucket_next[v];
|
|
1022 |
} else {
|
|
1023 |
_bucket_next[_bucket_prev[v]] = _bucket_next[v];
|
|
1024 |
_bucket_prev[_bucket_next[v]] = _bucket_prev[v];
|
|
1025 |
}
|
|
1026 |
}
|
|
1027 |
|
|
1028 |
// Insert v to its new bucket
|
|
1029 |
_bucket_next[v] = _buckets[new_rank_v];
|
|
1030 |
_bucket_prev[_buckets[new_rank_v]] = v;
|
|
1031 |
_buckets[new_rank_v] = v;
|
|
1032 |
}
|
|
1033 |
}
|
|
1034 |
}
|
|
1035 |
}
|
|
1036 |
|
|
1037 |
// Finish search if there are no more active nodes
|
|
1038 |
if (_excess[u] > 0) {
|
|
1039 |
total_excess -= _excess[u];
|
|
1040 |
if (total_excess <= 0) break;
|
|
1041 |
}
|
|
1042 |
}
|
|
1043 |
if (total_excess <= 0) break;
|
|
1044 |
}
|
|
1045 |
|
|
1046 |
// Relabel nodes
|
|
1047 |
for (int u = 0; u != _res_node_num; ++u) {
|
|
1048 |
int k = std::min(_rank[u], r);
|
|
1049 |
if (k > 0) {
|
|
1050 |
_pi[u] -= _epsilon * k;
|
|
1051 |
_next_out[u] = _first_out[u];
|
|
1052 |
}
|
|
1053 |
}
|
|
1054 |
}
|
|
1055 |
|
|
1056 |
/// Execute the algorithm performing augment and relabel operations
|
|
1057 |
void startAugment(int max_length = std::numeric_limits<int>::max()) {
|
|
1058 |
// Paramters for heuristics
|
|
1059 |
const int EARLY_TERM_EPSILON_LIMIT = 1000;
|
|
1060 |
const double GLOBAL_UPDATE_FACTOR = 3.0;
|
|
1061 |
|
|
1062 |
const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
|
|
1063 |
(_res_node_num + _sup_node_num * _sup_node_num));
|
|
1064 |
int next_update_limit = global_update_freq;
|
|
1065 |
|
|
1066 |
int relabel_cnt = 0;
|
|
1067 |
|
|
1068 |
// Perform cost scaling phases
|
|
1069 |
std::vector<int> path;
|
|
1070 |
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
|
|
1071 |
1 : _epsilon / _alpha )
|
|
1072 |
{
|
|
1073 |
// Early termination heuristic
|
|
1074 |
if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
|
|
1075 |
if (earlyTermination()) break;
|
|
1076 |
}
|
|
1077 |
|
|
1078 |
// Initialize current phase
|
|
1079 |
initPhase();
|
948 |
1080 |
|
... |
... |
@@ -957,21 +1089,16 @@
|
957 |
1089 |
int start = _active_nodes.front();
|
958 |
|
path_nodes.clear();
|
959 |
|
path_nodes.push_back(start);
|
960 |
1090 |
|
961 |
1091 |
// Find an augmenting path from the start node
|
|
1092 |
path.clear();
|
962 |
1093 |
int tip = start;
|
963 |
|
while (_excess[tip] >= 0 &&
|
964 |
|
int(path_nodes.size()) <= max_length) {
|
|
1094 |
while (_excess[tip] >= 0 && int(path.size()) < max_length) {
|
965 |
1095 |
int u;
|
966 |
|
LargeCost min_red_cost, rc;
|
967 |
|
int last_out = _sum_supply < 0 ?
|
968 |
|
_first_out[tip+1] : _first_out[tip+1] - 1;
|
|
1096 |
LargeCost min_red_cost, rc, pi_tip = _pi[tip];
|
|
1097 |
int last_out = _first_out[tip+1];
|
969 |
1098 |
for (int a = _next_out[tip]; a != last_out; ++a) {
|
970 |
|
if (_res_cap[a] > 0 &&
|
971 |
|
_cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) {
|
972 |
1099 |
u = _target[a];
|
973 |
|
pred_arc[u] = a;
|
|
1100 |
if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) {
|
|
1101 |
path.push_back(a);
|
974 |
1102 |
_next_out[tip] = a;
|
975 |
1103 |
tip = u;
|
976 |
|
path_nodes.push_back(tip);
|
977 |
1104 |
goto next_step;
|
... |
... |
@@ -981,5 +1108,9 @@
|
981 |
1108 |
// Relabel tip node
|
982 |
|
min_red_cost = std::numeric_limits<LargeCost>::max() / 2;
|
|
1109 |
min_red_cost = std::numeric_limits<LargeCost>::max();
|
|
1110 |
if (tip != start) {
|
|
1111 |
int ra = _reverse[path.back()];
|
|
1112 |
min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]];
|
|
1113 |
}
|
983 |
1114 |
for (int a = _first_out[tip]; a != last_out; ++a) {
|
984 |
|
rc = _cost[a] + _pi[_source[a]] - _pi[_target[a]];
|
|
1115 |
rc = _cost[a] + pi_tip - _pi[_target[a]];
|
985 |
1116 |
if (_res_cap[a] > 0 && rc < min_red_cost) {
|
... |
... |
@@ -989,5 +1120,4 @@
|
989 |
1120 |
_pi[tip] -= min_red_cost + _epsilon;
|
990 |
|
|
991 |
|
// Reset the next arc of tip
|
992 |
1121 |
_next_out[tip] = _first_out[tip];
|
|
1122 |
++relabel_cnt;
|
993 |
1123 |
|
... |
... |
@@ -995,4 +1125,4 @@
|
995 |
1125 |
if (tip != start) {
|
996 |
|
path_nodes.pop_back();
|
997 |
|
tip = path_nodes.back();
|
|
1126 |
tip = _source[path.back()];
|
|
1127 |
path.pop_back();
|
998 |
1128 |
}
|
... |
... |
@@ -1004,7 +1134,7 @@
|
1004 |
1134 |
Value delta;
|
1005 |
|
int u, v = path_nodes.front(), pa;
|
1006 |
|
for (int i = 1; i < int(path_nodes.size()); ++i) {
|
|
1135 |
int pa, u, v = start;
|
|
1136 |
for (int i = 0; i != int(path.size()); ++i) {
|
|
1137 |
pa = path[i];
|
1007 |
1138 |
u = v;
|
1008 |
|
v = path_nodes[i];
|
1009 |
|
pa = pred_arc[v];
|
|
1139 |
v = _target[pa];
|
1010 |
1140 |
delta = std::min(_res_cap[pa], _excess[u]);
|
... |
... |
@@ -1017,2 +1147,8 @@
|
1017 |
1147 |
}
|
|
1148 |
|
|
1149 |
// Global update heuristic
|
|
1150 |
if (relabel_cnt >= next_update_limit) {
|
|
1151 |
globalUpdate();
|
|
1152 |
next_update_limit += global_update_freq;
|
|
1153 |
}
|
1018 |
1154 |
}
|
... |
... |
@@ -1024,4 +1160,10 @@
|
1024 |
1160 |
// Paramters for heuristics
|
1025 |
|
const int BF_HEURISTIC_EPSILON_BOUND = 1000;
|
1026 |
|
const int BF_HEURISTIC_BOUND_FACTOR = 3;
|
|
1161 |
const int EARLY_TERM_EPSILON_LIMIT = 1000;
|
|
1162 |
const double GLOBAL_UPDATE_FACTOR = 2.0;
|
|
1163 |
|
|
1164 |
const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
|
|
1165 |
(_res_node_num + _sup_node_num * _sup_node_num));
|
|
1166 |
int next_update_limit = global_update_freq;
|
|
1167 |
|
|
1168 |
int relabel_cnt = 0;
|
1027 |
1169 |
|
... |
... |
@@ -1029,2 +1171,3 @@
|
1029 |
1171 |
BoolVector hyper(_res_node_num, false);
|
|
1172 |
LargeCostVector hyper_cost(_res_node_num);
|
1030 |
1173 |
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
|
... |
... |
@@ -1032,45 +1175,9 @@
|
1032 |
1175 |
{
|
1033 |
|
// "Early Termination" heuristic: use Bellman-Ford algorithm
|
1034 |
|
// to check if the current flow is optimal
|
1035 |
|
if (_epsilon <= BF_HEURISTIC_EPSILON_BOUND) {
|
1036 |
|
_arc_vec.clear();
|
1037 |
|
_cost_vec.clear();
|
1038 |
|
for (int j = 0; j != _res_arc_num; ++j) {
|
1039 |
|
if (_res_cap[j] > 0) {
|
1040 |
|
_arc_vec.push_back(IntPair(_source[j], _target[j]));
|
1041 |
|
_cost_vec.push_back(_cost[j] + 1);
|
1042 |
|
}
|
1043 |
|
}
|
1044 |
|
_sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
|
1045 |
|
|
1046 |
|
BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map);
|
1047 |
|
bf.init(0);
|
1048 |
|
bool done = false;
|
1049 |
|
int K = int(BF_HEURISTIC_BOUND_FACTOR * sqrt(_res_node_num));
|
1050 |
|
for (int i = 0; i < K && !done; ++i)
|
1051 |
|
done = bf.processNextWeakRound();
|
1052 |
|
if (done) break;
|
|
1176 |
// Early termination heuristic
|
|
1177 |
if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
|
|
1178 |
if (earlyTermination()) break;
|
1053 |
1179 |
}
|
1054 |
1180 |
|
1055 |
|
// Saturate arcs not satisfying the optimality condition
|
1056 |
|
for (int a = 0; a != _res_arc_num; ++a) {
|
1057 |
|
if (_res_cap[a] > 0 &&
|
1058 |
|
_cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) {
|
1059 |
|
Value delta = _res_cap[a];
|
1060 |
|
_excess[_source[a]] -= delta;
|
1061 |
|
_excess[_target[a]] += delta;
|
1062 |
|
_res_cap[a] = 0;
|
1063 |
|
_res_cap[_reverse[a]] += delta;
|
1064 |
|
}
|
1065 |
|
}
|
1066 |
|
|
1067 |
|
// Find active nodes (i.e. nodes with positive excess)
|
1068 |
|
for (int u = 0; u != _res_node_num; ++u) {
|
1069 |
|
if (_excess[u] > 0) _active_nodes.push_back(u);
|
1070 |
|
}
|
1071 |
|
|
1072 |
|
// Initialize the next arcs
|
1073 |
|
for (int u = 0; u != _res_node_num; ++u) {
|
1074 |
|
_next_out[u] = _first_out[u];
|
1075 |
|
}
|
|
1181 |
// Initialize current phase
|
|
1182 |
initPhase();
|
1076 |
1183 |
|
... |
... |
@@ -1078,3 +1185,3 @@
|
1078 |
1185 |
while (_active_nodes.size() > 0) {
|
1079 |
|
LargeCost min_red_cost, rc;
|
|
1186 |
LargeCost min_red_cost, rc, pi_n;
|
1080 |
1187 |
Value delta;
|
... |
... |
@@ -1082,7 +1189,7 @@
|
1082 |
1189 |
|
|
1190 |
next_node:
|
1083 |
1191 |
// Select an active node (FIFO selection)
|
1084 |
|
next_node:
|
1085 |
1192 |
n = _active_nodes.front();
|
1086 |
|
last_out = _sum_supply < 0 ?
|
1087 |
|
_first_out[n+1] : _first_out[n+1] - 1;
|
|
1193 |
last_out = _first_out[n+1];
|
|
1194 |
pi_n = _pi[n];
|
1088 |
1195 |
|
... |
... |
@@ -1092,3 +1199,3 @@
|
1092 |
1199 |
if (_res_cap[a] > 0 &&
|
1093 |
|
_cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) {
|
|
1200 |
_cost[a] + pi_n - _pi[_target[a]] < 0) {
|
1094 |
1201 |
delta = std::min(_res_cap[a], _excess[n]);
|
... |
... |
@@ -1098,7 +1205,7 @@
|
1098 |
1205 |
Value ahead = -_excess[t];
|
1099 |
|
int last_out_t = _sum_supply < 0 ?
|
1100 |
|
_first_out[t+1] : _first_out[t+1] - 1;
|
|
1206 |
int last_out_t = _first_out[t+1];
|
|
1207 |
LargeCost pi_t = _pi[t];
|
1101 |
1208 |
for (int ta = _next_out[t]; ta != last_out_t; ++ta) {
|
1102 |
1209 |
if (_res_cap[ta] > 0 &&
|
1103 |
|
_cost[ta] + _pi[_source[ta]] - _pi[_target[ta]] < 0)
|
|
1210 |
_cost[ta] + pi_t - _pi[_target[ta]] < 0)
|
1104 |
1211 |
ahead += _res_cap[ta];
|
... |
... |
@@ -1109,3 +1216,3 @@
|
1109 |
1216 |
// Push flow along the arc
|
1110 |
|
if (ahead < delta) {
|
|
1217 |
if (ahead < delta && !hyper[t]) {
|
1111 |
1218 |
_res_cap[a] -= ahead;
|
... |
... |
@@ -1116,2 +1223,3 @@
|
1116 |
1223 |
hyper[t] = true;
|
|
1224 |
hyper_cost[t] = _cost[a] + pi_n - pi_t;
|
1117 |
1225 |
_next_out[n] = a;
|
... |
... |
@@ -1138,5 +1246,6 @@
|
1138 |
1246 |
if (_excess[n] > 0 || hyper[n]) {
|
1139 |
|
min_red_cost = std::numeric_limits<LargeCost>::max() / 2;
|
|
1247 |
min_red_cost = hyper[n] ? -hyper_cost[n] :
|
|
1248 |
std::numeric_limits<LargeCost>::max();
|
1140 |
1249 |
for (int a = _first_out[n]; a != last_out; ++a) {
|
1141 |
|
rc = _cost[a] + _pi[_source[a]] - _pi[_target[a]];
|
|
1250 |
rc = _cost[a] + pi_n - _pi[_target[a]];
|
1142 |
1251 |
if (_res_cap[a] > 0 && rc < min_red_cost) {
|
... |
... |
@@ -1146,6 +1255,5 @@
|
1146 |
1255 |
_pi[n] -= min_red_cost + _epsilon;
|
|
1256 |
_next_out[n] = _first_out[n];
|
1147 |
1257 |
hyper[n] = false;
|
1148 |
|
|
1149 |
|
// Reset the next arc
|
1150 |
|
_next_out[n] = _first_out[n];
|
|
1258 |
++relabel_cnt;
|
1151 |
1259 |
}
|
... |
... |
@@ -1159,2 +1267,10 @@
|
1159 |
1267 |
}
|
|
1268 |
|
|
1269 |
// Global update heuristic
|
|
1270 |
if (relabel_cnt >= next_update_limit) {
|
|
1271 |
globalUpdate();
|
|
1272 |
for (int u = 0; u != _res_node_num; ++u)
|
|
1273 |
hyper[u] = false;
|
|
1274 |
next_update_limit += global_update_freq;
|
|
1275 |
}
|
1160 |
1276 |
}
|