... | ... |
@@ -959,72 +959,271 @@ |
959 | 959 |
Value delta = _res_cap[a]; |
960 | 960 |
if (delta > 0) { |
961 | 961 |
int v = _target[a]; |
962 | 962 |
if (_cost[a] + pi_u - _pi[v] < 0) { |
963 | 963 |
_excess[u] -= delta; |
964 | 964 |
_excess[v] += delta; |
965 | 965 |
_res_cap[a] = 0; |
966 | 966 |
_res_cap[_reverse[a]] += delta; |
967 | 967 |
} |
968 | 968 |
} |
969 | 969 |
} |
970 | 970 |
} |
971 | 971 |
|
972 | 972 |
// Find active nodes (i.e. nodes with positive excess) |
973 | 973 |
for (int u = 0; u != _res_node_num; ++u) { |
974 | 974 |
if (_excess[u] > 0) _active_nodes.push_back(u); |
975 | 975 |
} |
976 | 976 |
|
977 | 977 |
// Initialize the next arcs |
978 | 978 |
for (int u = 0; u != _res_node_num; ++u) { |
979 | 979 |
_next_out[u] = _first_out[u]; |
980 | 980 |
} |
981 | 981 |
} |
982 | 982 |
|
983 |
// Early termination heuristic |
|
984 |
bool earlyTermination() { |
|
985 |
|
|
983 |
// Price (potential) refinement heuristic |
|
984 |
bool priceRefinement() { |
|
986 | 985 |
|
987 |
// Build a static residual graph |
|
988 |
_arc_vec.clear(); |
|
989 |
_cost_vec.clear(); |
|
990 |
for (int j = 0; j != _res_arc_num; ++j) { |
|
991 |
if (_res_cap[j] > 0) { |
|
992 |
_arc_vec.push_back(IntPair(_source[j], _target[j])); |
|
993 |
|
|
986 |
// Stack for stroing the topological order |
|
987 |
IntVector stack(_res_node_num); |
|
988 |
int stack_top; |
|
989 |
|
|
990 |
// Perform phases |
|
991 |
while (topologicalSort(stack, stack_top)) { |
|
992 |
|
|
993 |
// Compute node ranks in the acyclic admissible network and |
|
994 |
// store the nodes in buckets |
|
995 |
for (int i = 0; i != _res_node_num; ++i) { |
|
996 |
_rank[i] = 0; |
|
994 | 997 |
} |
998 |
const int bucket_end = _root + 1; |
|
999 |
for (int r = 0; r != _max_rank; ++r) { |
|
1000 |
_buckets[r] = bucket_end; |
|
1001 |
} |
|
1002 |
int top_rank = 0; |
|
1003 |
for ( ; stack_top >= 0; --stack_top) { |
|
1004 |
int u = stack[stack_top], v; |
|
1005 |
int rank_u = _rank[u]; |
|
1006 |
|
|
1007 |
LargeCost rc, pi_u = _pi[u]; |
|
1008 |
int last_out = _first_out[u+1]; |
|
1009 |
for (int a = _first_out[u]; a != last_out; ++a) { |
|
1010 |
if (_res_cap[a] > 0) { |
|
1011 |
v = _target[a]; |
|
1012 |
rc = _cost[a] + pi_u - _pi[v]; |
|
1013 |
if (rc < 0) { |
|
1014 |
LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon); |
|
1015 |
if (nrc < LargeCost(_max_rank)) { |
|
1016 |
int new_rank_v = rank_u + static_cast<int>(nrc); |
|
1017 |
if (new_rank_v > _rank[v]) { |
|
1018 |
_rank[v] = new_rank_v; |
|
1019 |
} |
|
1020 |
} |
|
1021 |
} |
|
1022 |
} |
|
1023 |
} |
|
1024 |
|
|
1025 |
if (rank_u > 0) { |
|
1026 |
top_rank = std::max(top_rank, rank_u); |
|
1027 |
int bfirst = _buckets[rank_u]; |
|
1028 |
_bucket_next[u] = bfirst; |
|
1029 |
_bucket_prev[bfirst] = u; |
|
1030 |
_buckets[rank_u] = u; |
|
1031 |
} |
|
1032 |
} |
|
1033 |
|
|
1034 |
// Check if the current flow is epsilon-optimal |
|
1035 |
if (top_rank == 0) { |
|
1036 |
return true; |
|
1037 |
} |
|
1038 |
|
|
1039 |
// Process buckets in top-down order |
|
1040 |
for (int rank = top_rank; rank > 0; --rank) { |
|
1041 |
while (_buckets[rank] != bucket_end) { |
|
1042 |
// Remove the first node from the current bucket |
|
1043 |
int u = _buckets[rank]; |
|
1044 |
_buckets[rank] = _bucket_next[u]; |
|
1045 |
|
|
1046 |
// Search the outgoing arcs of u |
|
1047 |
LargeCost rc, pi_u = _pi[u]; |
|
1048 |
int last_out = _first_out[u+1]; |
|
1049 |
int v, old_rank_v, new_rank_v; |
|
1050 |
for (int a = _first_out[u]; a != last_out; ++a) { |
|
1051 |
if (_res_cap[a] > 0) { |
|
1052 |
v = _target[a]; |
|
1053 |
old_rank_v = _rank[v]; |
|
1054 |
|
|
1055 |
if (old_rank_v < rank) { |
|
1056 |
|
|
1057 |
// Compute the new rank of node v |
|
1058 |
rc = _cost[a] + pi_u - _pi[v]; |
|
1059 |
if (rc < 0) { |
|
1060 |
new_rank_v = rank; |
|
1061 |
} else { |
|
1062 |
LargeCost nrc = rc / _epsilon; |
|
1063 |
new_rank_v = 0; |
|
1064 |
if (nrc < LargeCost(_max_rank)) { |
|
1065 |
new_rank_v = rank - 1 - static_cast<int>(nrc); |
|
1066 |
} |
|
1067 |
} |
|
1068 |
|
|
1069 |
// Change the rank of node v |
|
1070 |
if (new_rank_v > old_rank_v) { |
|
1071 |
_rank[v] = new_rank_v; |
|
1072 |
|
|
1073 |
// Remove v from its old bucket |
|
1074 |
if (old_rank_v > 0) { |
|
1075 |
if (_buckets[old_rank_v] == v) { |
|
1076 |
_buckets[old_rank_v] = _bucket_next[v]; |
|
1077 |
} else { |
|
1078 |
int pv = _bucket_prev[v], nv = _bucket_next[v]; |
|
1079 |
_bucket_next[pv] = nv; |
|
1080 |
_bucket_prev[nv] = pv; |
|
1081 |
} |
|
1082 |
} |
|
1083 |
|
|
1084 |
// Insert v into its new bucket |
|
1085 |
int nv = _buckets[new_rank_v]; |
|
1086 |
_bucket_next[v] = nv; |
|
1087 |
_bucket_prev[nv] = v; |
|
1088 |
_buckets[new_rank_v] = v; |
|
1089 |
} |
|
1090 |
} |
|
1091 |
} |
|
1092 |
} |
|
1093 |
|
|
1094 |
// Refine potential of node u |
|
1095 |
_pi[u] -= rank * _epsilon; |
|
1096 |
} |
|
1097 |
} |
|
1098 |
|
|
995 | 1099 |
} |
996 |
_sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); |
|
997 | 1100 |
|
998 |
// Run Bellman-Ford algorithm to check if the current flow is optimal |
|
999 |
BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); |
|
1000 |
bf.init(0); |
|
1001 |
bool done = false; |
|
1002 |
int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num))); |
|
1003 |
for (int i = 0; i < K && !done; ++i) { |
|
1004 |
|
|
1101 |
return false; |
|
1102 |
} |
|
1103 |
|
|
1104 |
// Find and cancel cycles in the admissible network and |
|
1105 |
// determine topological order using DFS |
|
1106 |
bool topologicalSort(IntVector &stack, int &stack_top) { |
|
1107 |
const int MAX_CYCLE_CANCEL = 1; |
|
1108 |
|
|
1109 |
BoolVector reached(_res_node_num, false); |
|
1110 |
BoolVector processed(_res_node_num, false); |
|
1111 |
IntVector pred(_res_node_num); |
|
1112 |
for (int i = 0; i != _res_node_num; ++i) { |
|
1113 |
_next_out[i] = _first_out[i]; |
|
1005 | 1114 |
} |
1006 |
|
|
1115 |
stack_top = -1; |
|
1116 |
|
|
1117 |
int cycle_cnt = 0; |
|
1118 |
for (int start = 0; start != _res_node_num; ++start) { |
|
1119 |
if (reached[start]) continue; |
|
1120 |
|
|
1121 |
// Start DFS search from this start node |
|
1122 |
pred[start] = -1; |
|
1123 |
int tip = start, v; |
|
1124 |
while (true) { |
|
1125 |
// Check the outgoing arcs of the current tip node |
|
1126 |
reached[tip] = true; |
|
1127 |
LargeCost pi_tip = _pi[tip]; |
|
1128 |
int a, last_out = _first_out[tip+1]; |
|
1129 |
for (a = _next_out[tip]; a != last_out; ++a) { |
|
1130 |
if (_res_cap[a] > 0) { |
|
1131 |
v = _target[a]; |
|
1132 |
if (_cost[a] + pi_tip - _pi[v] < 0) { |
|
1133 |
if (!reached[v]) { |
|
1134 |
// A new node is reached |
|
1135 |
reached[v] = true; |
|
1136 |
pred[v] = tip; |
|
1137 |
_next_out[tip] = a; |
|
1138 |
tip = v; |
|
1139 |
a = _next_out[tip]; |
|
1140 |
last_out = _first_out[tip+1]; |
|
1141 |
break; |
|
1142 |
} |
|
1143 |
else if (!processed[v]) { |
|
1144 |
// A cycle is found |
|
1145 |
++cycle_cnt; |
|
1146 |
_next_out[tip] = a; |
|
1147 |
|
|
1148 |
// Find the minimum residual capacity along the cycle |
|
1149 |
Value d, delta = _res_cap[a]; |
|
1150 |
int u, delta_node = tip; |
|
1151 |
for (u = tip; u != v; ) { |
|
1152 |
u = pred[u]; |
|
1153 |
d = _res_cap[_next_out[u]]; |
|
1154 |
if (d <= delta) { |
|
1155 |
delta = d; |
|
1156 |
delta_node = u; |
|
1157 |
} |
|
1158 |
} |
|
1159 |
|
|
1160 |
// Augment along the cycle |
|
1161 |
_res_cap[a] -= delta; |
|
1162 |
_res_cap[_reverse[a]] += delta; |
|
1163 |
for (u = tip; u != v; ) { |
|
1164 |
u = pred[u]; |
|
1165 |
int ca = _next_out[u]; |
|
1166 |
_res_cap[ca] -= delta; |
|
1167 |
_res_cap[_reverse[ca]] += delta; |
|
1168 |
} |
|
1169 |
|
|
1170 |
// Check the maximum number of cycle canceling |
|
1171 |
if (cycle_cnt >= MAX_CYCLE_CANCEL) { |
|
1172 |
return false; |
|
1173 |
} |
|
1174 |
|
|
1175 |
// Roll back search to delta_node |
|
1176 |
if (delta_node != tip) { |
|
1177 |
for (u = tip; u != delta_node; u = pred[u]) { |
|
1178 |
reached[u] = false; |
|
1179 |
} |
|
1180 |
tip = delta_node; |
|
1181 |
a = _next_out[tip] + 1; |
|
1182 |
last_out = _first_out[tip+1]; |
|
1183 |
break; |
|
1184 |
} |
|
1185 |
} |
|
1186 |
} |
|
1187 |
} |
|
1188 |
} |
|
1189 |
|
|
1190 |
// Step back to the previous node |
|
1191 |
if (a == last_out) { |
|
1192 |
processed[tip] = true; |
|
1193 |
stack[++stack_top] = tip; |
|
1194 |
tip = pred[tip]; |
|
1195 |
if (tip < 0) { |
|
1196 |
// Finish DFS from the current start node |
|
1197 |
break; |
|
1198 |
} |
|
1199 |
++_next_out[tip]; |
|
1200 |
} |
|
1201 |
} |
|
1202 |
|
|
1203 |
} |
|
1204 |
|
|
1205 |
return (cycle_cnt == 0); |
|
1007 | 1206 |
} |
1008 | 1207 |
|
1009 | 1208 |
// Global potential update heuristic |
1010 | 1209 |
void globalUpdate() { |
1011 | 1210 |
const int bucket_end = _root + 1; |
1012 | 1211 |
|
1013 | 1212 |
// Initialize buckets |
1014 | 1213 |
for (int r = 0; r != _max_rank; ++r) { |
1015 | 1214 |
_buckets[r] = bucket_end; |
1016 | 1215 |
} |
1017 | 1216 |
Value total_excess = 0; |
1018 | 1217 |
int b0 = bucket_end; |
1019 | 1218 |
for (int i = 0; i != _res_node_num; ++i) { |
1020 | 1219 |
if (_excess[i] < 0) { |
1021 | 1220 |
_rank[i] = 0; |
1022 | 1221 |
_bucket_next[i] = b0; |
1023 | 1222 |
_bucket_prev[b0] = i; |
1024 | 1223 |
b0 = i; |
1025 | 1224 |
} else { |
1026 | 1225 |
total_excess += _excess[i]; |
1027 | 1226 |
_rank[i] = _max_rank; |
1028 | 1227 |
} |
1029 | 1228 |
} |
1030 | 1229 |
if (total_excess == 0) return; |
... | ... |
@@ -1081,64 +1280,67 @@ |
1081 | 1280 |
} |
1082 | 1281 |
|
1083 | 1282 |
// Finish search if there are no more active nodes |
1084 | 1283 |
if (_excess[u] > 0) { |
1085 | 1284 |
total_excess -= _excess[u]; |
1086 | 1285 |
if (total_excess <= 0) break; |
1087 | 1286 |
} |
1088 | 1287 |
} |
1089 | 1288 |
if (total_excess <= 0) break; |
1090 | 1289 |
} |
1091 | 1290 |
|
1092 | 1291 |
// Relabel nodes |
1093 | 1292 |
for (int u = 0; u != _res_node_num; ++u) { |
1094 | 1293 |
int k = std::min(_rank[u], r); |
1095 | 1294 |
if (k > 0) { |
1096 | 1295 |
_pi[u] -= _epsilon * k; |
1097 | 1296 |
_next_out[u] = _first_out[u]; |
1098 | 1297 |
} |
1099 | 1298 |
} |
1100 | 1299 |
} |
1101 | 1300 |
|
1102 | 1301 |
/// Execute the algorithm performing augment and relabel operations |
1103 | 1302 |
void startAugment(int max_length) { |
1104 | 1303 |
// Paramters for heuristics |
1105 |
const int |
|
1304 |
const int PRICE_REFINEMENT_LIMIT = 2; |
|
1106 | 1305 |
const double GLOBAL_UPDATE_FACTOR = 1.0; |
1107 | 1306 |
const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * |
1108 | 1307 |
(_res_node_num + _sup_node_num * _sup_node_num)); |
1109 | 1308 |
int next_global_update_limit = global_update_skip; |
1110 | 1309 |
|
1111 | 1310 |
// Perform cost scaling phases |
1112 | 1311 |
IntVector path; |
1113 | 1312 |
BoolVector path_arc(_res_arc_num, false); |
1114 | 1313 |
int relabel_cnt = 0; |
1314 |
int eps_phase_cnt = 0; |
|
1115 | 1315 |
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1116 | 1316 |
1 : _epsilon / _alpha ) |
1117 | 1317 |
{ |
1118 |
// Early termination heuristic |
|
1119 |
if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
|
1120 |
|
|
1318 |
++eps_phase_cnt; |
|
1319 |
|
|
1320 |
// Price refinement heuristic |
|
1321 |
if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { |
|
1322 |
if (priceRefinement()) continue; |
|
1121 | 1323 |
} |
1122 | 1324 |
|
1123 | 1325 |
// Initialize current phase |
1124 | 1326 |
initPhase(); |
1125 | 1327 |
|
1126 | 1328 |
// Perform partial augment and relabel operations |
1127 | 1329 |
while (true) { |
1128 | 1330 |
// Select an active node (FIFO selection) |
1129 | 1331 |
while (_active_nodes.size() > 0 && |
1130 | 1332 |
_excess[_active_nodes.front()] <= 0) { |
1131 | 1333 |
_active_nodes.pop_front(); |
1132 | 1334 |
} |
1133 | 1335 |
if (_active_nodes.size() == 0) break; |
1134 | 1336 |
int start = _active_nodes.front(); |
1135 | 1337 |
|
1136 | 1338 |
// Find an augmenting path from the start node |
1137 | 1339 |
int tip = start; |
1138 | 1340 |
while (int(path.size()) < max_length && _excess[tip] >= 0) { |
1139 | 1341 |
int u; |
1140 | 1342 |
LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max(); |
1141 | 1343 |
LargeCost pi_tip = _pi[tip]; |
1142 | 1344 |
int last_out = _first_out[tip+1]; |
1143 | 1345 |
for (int a = _next_out[tip]; a != last_out; ++a) { |
1144 | 1346 |
if (_res_cap[a] > 0) { |
... | ... |
@@ -1203,65 +1405,68 @@ |
1203 | 1405 |
_res_cap[pa] -= delta; |
1204 | 1406 |
_res_cap[_reverse[pa]] += delta; |
1205 | 1407 |
_excess[u] -= delta; |
1206 | 1408 |
_excess[v] += delta; |
1207 | 1409 |
if (_excess[v] > 0 && _excess[v] <= delta) { |
1208 | 1410 |
_active_nodes.push_back(v); |
1209 | 1411 |
} |
1210 | 1412 |
} |
1211 | 1413 |
path.clear(); |
1212 | 1414 |
|
1213 | 1415 |
// Global update heuristic |
1214 | 1416 |
if (relabel_cnt >= next_global_update_limit) { |
1215 | 1417 |
globalUpdate(); |
1216 | 1418 |
next_global_update_limit += global_update_skip; |
1217 | 1419 |
} |
1218 | 1420 |
} |
1219 | 1421 |
|
1220 | 1422 |
} |
1221 | 1423 |
|
1222 | 1424 |
} |
1223 | 1425 |
|
1224 | 1426 |
/// Execute the algorithm performing push and relabel operations |
1225 | 1427 |
void startPush() { |
1226 | 1428 |
// Paramters for heuristics |
1227 |
const int |
|
1429 |
const int PRICE_REFINEMENT_LIMIT = 2; |
|
1228 | 1430 |
const double GLOBAL_UPDATE_FACTOR = 2.0; |
1229 | 1431 |
|
1230 | 1432 |
const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * |
1231 | 1433 |
(_res_node_num + _sup_node_num * _sup_node_num)); |
1232 | 1434 |
int next_global_update_limit = global_update_skip; |
1233 | 1435 |
|
1234 | 1436 |
// Perform cost scaling phases |
1235 | 1437 |
BoolVector hyper(_res_node_num, false); |
1236 | 1438 |
LargeCostVector hyper_cost(_res_node_num); |
1237 | 1439 |
int relabel_cnt = 0; |
1440 |
int eps_phase_cnt = 0; |
|
1238 | 1441 |
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? |
1239 | 1442 |
1 : _epsilon / _alpha ) |
1240 | 1443 |
{ |
1241 |
// Early termination heuristic |
|
1242 |
if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { |
|
1243 |
|
|
1444 |
++eps_phase_cnt; |
|
1445 |
|
|
1446 |
// Price refinement heuristic |
|
1447 |
if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { |
|
1448 |
if (priceRefinement()) continue; |
|
1244 | 1449 |
} |
1245 | 1450 |
|
1246 | 1451 |
// Initialize current phase |
1247 | 1452 |
initPhase(); |
1248 | 1453 |
|
1249 | 1454 |
// Perform push and relabel operations |
1250 | 1455 |
while (_active_nodes.size() > 0) { |
1251 | 1456 |
LargeCost min_red_cost, rc, pi_n; |
1252 | 1457 |
Value delta; |
1253 | 1458 |
int n, t, a, last_out = _res_arc_num; |
1254 | 1459 |
|
1255 | 1460 |
next_node: |
1256 | 1461 |
// Select an active node (FIFO selection) |
1257 | 1462 |
n = _active_nodes.front(); |
1258 | 1463 |
last_out = _first_out[n+1]; |
1259 | 1464 |
pi_n = _pi[n]; |
1260 | 1465 |
|
1261 | 1466 |
// Perform push operations if there are admissible arcs |
1262 | 1467 |
if (_excess[n] > 0) { |
1263 | 1468 |
for (a = _next_out[n]; a != last_out; ++a) { |
1264 | 1469 |
if (_res_cap[a] > 0 && |
1265 | 1470 |
_cost[a] + pi_n - _pi[_target[a]] < 0) { |
1266 | 1471 |
delta = std::min(_res_cap[a], _excess[n]); |
1267 | 1472 |
t = _target[a]; |
0 comments (0 inline)