Changeset 910:f3bc4e9b5f3a in lemon for lemon/cost_scaling.h
- Timestamp:
- 02/20/10 18:39:03 (15 years ago)
- Branch:
- default
- Phase:
- public
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
lemon/cost_scaling.h
r887 r910 198 198 199 199 typedef std::vector<int> IntVector; 200 typedef std::vector<char> BoolVector;201 200 typedef std::vector<Value> ValueVector; 202 201 typedef std::vector<Cost> CostVector; 203 202 typedef std::vector<LargeCost> LargeCostVector; 203 typedef std::vector<char> BoolVector; 204 // Note: vector<char> is used instead of vector<bool> for efficiency reasons 204 205 205 206 private: … … 245 246 bool _have_lower; 246 247 Value _sum_supply; 248 int _sup_node_num; 247 249 248 250 // Data structures for storing the digraph … … 273 275 int _alpha; 274 276 277 IntVector _buckets; 278 IntVector _bucket_next; 279 IntVector _bucket_prev; 280 IntVector _rank; 281 int _max_rank; 282 275 283 // Data for a StaticDigraph structure 276 284 typedef std::pair<int, int> IntPair; … … 803 811 } 804 812 813 _sup_node_num = 0; 814 for (NodeIt n(_graph); n != INVALID; ++n) { 815 if (sup[n] > 0) ++_sup_node_num; 816 } 817 805 818 // Find a feasible flow using Circulation 806 819 Circulation<Digraph, ConstMap<Arc, Value>, ValueArcMap, ValueNodeMap> … … 837 850 for (int a = _first_out[_root]; a != _res_arc_num; ++a) { 838 851 int ra = _reverse[a]; 839 _res_cap[a] = 1;852 _res_cap[a] = 0; 840 853 _res_cap[ra] = 0; 841 854 _cost[a] = 0; … … 851 864 // Maximum path length for partial augment 852 865 const int MAX_PATH_LENGTH = 4; 853 866 867 // Initialize data structures for buckets 868 _max_rank = _alpha * _res_node_num; 869 _buckets.resize(_max_rank); 870 _bucket_next.resize(_res_node_num + 1); 871 _bucket_prev.resize(_res_node_num + 1); 872 _rank.resize(_res_node_num + 1); 873 854 874 // Execute the algorithm 855 875 switch (method) { … … 890 910 } 891 911 } 912 913 // Initialize a cost scaling phase 914 void initPhase() { 915 // Saturate arcs not satisfying the optimality condition 916 for (int u = 0; u != _res_node_num; ++u) { 917 int last_out = _first_out[u+1]; 918 LargeCost pi_u = _pi[u]; 919 for (int a = _first_out[u]; a != last_out; ++a) { 920 int v = _target[a]; 921 if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) { 922 Value delta = _res_cap[a]; 923 _excess[u] -= delta; 924 _excess[v] += delta; 925 _res_cap[a] = 0; 926 _res_cap[_reverse[a]] += delta; 927 } 928 } 929 } 930 931 // Find active nodes (i.e. nodes with positive excess) 932 for (int u = 0; u != _res_node_num; ++u) { 933 if (_excess[u] > 0) _active_nodes.push_back(u); 934 } 935 936 // Initialize the next arcs 937 for (int u = 0; u != _res_node_num; ++u) { 938 _next_out[u] = _first_out[u]; 939 } 940 } 941 942 // Early termination heuristic 943 bool earlyTermination() { 944 const double EARLY_TERM_FACTOR = 3.0; 945 946 // Build a static residual graph 947 _arc_vec.clear(); 948 _cost_vec.clear(); 949 for (int j = 0; j != _res_arc_num; ++j) { 950 if (_res_cap[j] > 0) { 951 _arc_vec.push_back(IntPair(_source[j], _target[j])); 952 _cost_vec.push_back(_cost[j] + 1); 953 } 954 } 955 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 956 957 // Run Bellman-Ford algorithm to check if the current flow is optimal 958 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 959 bf.init(0); 960 bool done = false; 961 int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num))); 962 for (int i = 0; i < K && !done; ++i) { 963 done = bf.processNextWeakRound(); 964 } 965 return done; 966 } 967 968 // Global potential update heuristic 969 void globalUpdate() { 970 int bucket_end = _root + 1; 971 972 // Initialize buckets 973 for (int r = 0; r != _max_rank; ++r) { 974 _buckets[r] = bucket_end; 975 } 976 Value total_excess = 0; 977 for (int i = 0; i != _res_node_num; ++i) { 978 if (_excess[i] < 0) { 979 _rank[i] = 0; 980 _bucket_next[i] = _buckets[0]; 981 _bucket_prev[_buckets[0]] = i; 982 _buckets[0] = i; 983 } else { 984 total_excess += _excess[i]; 985 _rank[i] = _max_rank; 986 } 987 } 988 if (total_excess == 0) return; 989 990 // Search the buckets 991 int r = 0; 992 for ( ; r != _max_rank; ++r) { 993 while (_buckets[r] != bucket_end) { 994 // Remove the first node from the current bucket 995 int u = _buckets[r]; 996 _buckets[r] = _bucket_next[u]; 997 998 // Search the incomming arcs of u 999 LargeCost pi_u = _pi[u]; 1000 int last_out = _first_out[u+1]; 1001 for (int a = _first_out[u]; a != last_out; ++a) { 1002 int ra = _reverse[a]; 1003 if (_res_cap[ra] > 0) { 1004 int v = _source[ra]; 1005 int old_rank_v = _rank[v]; 1006 if (r < old_rank_v) { 1007 // Compute the new rank of v 1008 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; 1009 int new_rank_v = old_rank_v; 1010 if (nrc < LargeCost(_max_rank)) 1011 new_rank_v = r + 1 + int(nrc); 1012 1013 // Change the rank of v 1014 if (new_rank_v < old_rank_v) { 1015 _rank[v] = new_rank_v; 1016 _next_out[v] = _first_out[v]; 1017 1018 // Remove v from its old bucket 1019 if (old_rank_v < _max_rank) { 1020 if (_buckets[old_rank_v] == v) { 1021 _buckets[old_rank_v] = _bucket_next[v]; 1022 } else { 1023 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; 1024 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; 1025 } 1026 } 1027 1028 // Insert v to its new bucket 1029 _bucket_next[v] = _buckets[new_rank_v]; 1030 _bucket_prev[_buckets[new_rank_v]] = v; 1031 _buckets[new_rank_v] = v; 1032 } 1033 } 1034 } 1035 } 1036 1037 // Finish search if there are no more active nodes 1038 if (_excess[u] > 0) { 1039 total_excess -= _excess[u]; 1040 if (total_excess <= 0) break; 1041 } 1042 } 1043 if (total_excess <= 0) break; 1044 } 1045 1046 // Relabel nodes 1047 for (int u = 0; u != _res_node_num; ++u) { 1048 int k = std::min(_rank[u], r); 1049 if (k > 0) { 1050 _pi[u] -= _epsilon * k; 1051 _next_out[u] = _first_out[u]; 1052 } 1053 } 1054 } 892 1055 893 1056 /// Execute the algorithm performing augment and relabel operations 894 1057 void startAugment(int max_length = std::numeric_limits<int>::max()) { 895 1058 // Paramters for heuristics 896 const int BF_HEURISTIC_EPSILON_BOUND = 1000; 897 const int BF_HEURISTIC_BOUND_FACTOR = 3; 898 1059 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1060 const double GLOBAL_UPDATE_FACTOR = 3.0; 1061 1062 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1063 (_res_node_num + _sup_node_num * _sup_node_num)); 1064 int next_update_limit = global_update_freq; 1065 1066 int relabel_cnt = 0; 1067 899 1068 // Perform cost scaling phases 900 IntVector pred_arc(_res_node_num); 901 std::vector<int> path_nodes; 1069 std::vector<int> path; 902 1070 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 903 1071 1 : _epsilon / _alpha ) 904 1072 { 905 // "Early Termination" heuristic: use Bellman-Ford algorithm 906 // to check if the current flow is optimal 907 if (_epsilon <= BF_HEURISTIC_EPSILON_BOUND) { 908 _arc_vec.clear(); 909 _cost_vec.clear(); 910 for (int j = 0; j != _res_arc_num; ++j) { 911 if (_res_cap[j] > 0) { 912 _arc_vec.push_back(IntPair(_source[j], _target[j])); 913 _cost_vec.push_back(_cost[j] + 1); 914 } 915 } 916 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 917 918 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 919 bf.init(0); 920 bool done = false; 921 int K = int(BF_HEURISTIC_BOUND_FACTOR * sqrt(_res_node_num)); 922 for (int i = 0; i < K && !done; ++i) 923 done = bf.processNextWeakRound(); 924 if (done) break; 925 } 926 927 // Saturate arcs not satisfying the optimality condition 928 for (int a = 0; a != _res_arc_num; ++a) { 929 if (_res_cap[a] > 0 && 930 _cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) { 931 Value delta = _res_cap[a]; 932 _excess[_source[a]] -= delta; 933 _excess[_target[a]] += delta; 934 _res_cap[a] = 0; 935 _res_cap[_reverse[a]] += delta; 936 } 1073 // Early termination heuristic 1074 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1075 if (earlyTermination()) break; 937 1076 } 938 1077 939 // Find active nodes (i.e. nodes with positive excess) 940 for (int u = 0; u != _res_node_num; ++u) { 941 if (_excess[u] > 0) _active_nodes.push_back(u); 942 } 943 944 // Initialize the next arcs 945 for (int u = 0; u != _res_node_num; ++u) { 946 _next_out[u] = _first_out[u]; 947 } 948 1078 // Initialize current phase 1079 initPhase(); 1080 949 1081 // Perform partial augment and relabel operations 950 1082 while (true) { … … 956 1088 if (_active_nodes.size() == 0) break; 957 1089 int start = _active_nodes.front(); 958 path_nodes.clear();959 path_nodes.push_back(start);960 1090 961 1091 // Find an augmenting path from the start node 1092 path.clear(); 962 1093 int tip = start; 963 while (_excess[tip] >= 0 && 964 int(path_nodes.size()) <= max_length) { 1094 while (_excess[tip] >= 0 && int(path.size()) < max_length) { 965 1095 int u; 966 LargeCost min_red_cost, rc; 967 int last_out = _sum_supply < 0 ? 968 _first_out[tip+1] : _first_out[tip+1] - 1; 1096 LargeCost min_red_cost, rc, pi_tip = _pi[tip]; 1097 int last_out = _first_out[tip+1]; 969 1098 for (int a = _next_out[tip]; a != last_out; ++a) { 970 if (_res_cap[a] > 0 && 971 _cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) { 972 u = _target[a]; 973 pred_arc[u] = a; 1099 u = _target[a]; 1100 if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) { 1101 path.push_back(a); 974 1102 _next_out[tip] = a; 975 1103 tip = u; 976 path_nodes.push_back(tip);977 1104 goto next_step; 978 1105 } … … 980 1107 981 1108 // Relabel tip node 982 min_red_cost = std::numeric_limits<LargeCost>::max() / 2; 1109 min_red_cost = std::numeric_limits<LargeCost>::max(); 1110 if (tip != start) { 1111 int ra = _reverse[path.back()]; 1112 min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]]; 1113 } 983 1114 for (int a = _first_out[tip]; a != last_out; ++a) { 984 rc = _cost[a] + _pi[_source[a]]- _pi[_target[a]];1115 rc = _cost[a] + pi_tip - _pi[_target[a]]; 985 1116 if (_res_cap[a] > 0 && rc < min_red_cost) { 986 1117 min_red_cost = rc; … … 988 1119 } 989 1120 _pi[tip] -= min_red_cost + _epsilon; 990 991 // Reset the next arc of tip992 1121 _next_out[tip] = _first_out[tip]; 1122 ++relabel_cnt; 993 1123 994 1124 // Step back 995 1125 if (tip != start) { 996 path_nodes.pop_back();997 tip = path_nodes.back();1126 tip = _source[path.back()]; 1127 path.pop_back(); 998 1128 } 999 1129 … … 1003 1133 // Augment along the found path (as much flow as possible) 1004 1134 Value delta; 1005 int u, v = path_nodes.front(), pa; 1006 for (int i = 1; i < int(path_nodes.size()); ++i) { 1135 int pa, u, v = start; 1136 for (int i = 0; i != int(path.size()); ++i) { 1137 pa = path[i]; 1007 1138 u = v; 1008 v = path_nodes[i]; 1009 pa = pred_arc[v]; 1139 v = _target[pa]; 1010 1140 delta = std::min(_res_cap[pa], _excess[u]); 1011 1141 _res_cap[pa] -= delta; … … 1016 1146 _active_nodes.push_back(v); 1017 1147 } 1148 1149 // Global update heuristic 1150 if (relabel_cnt >= next_update_limit) { 1151 globalUpdate(); 1152 next_update_limit += global_update_freq; 1153 } 1018 1154 } 1019 1155 } … … 1023 1159 void startPush() { 1024 1160 // Paramters for heuristics 1025 const int BF_HEURISTIC_EPSILON_BOUND = 1000; 1026 const int BF_HEURISTIC_BOUND_FACTOR = 3; 1027 1161 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1162 const double GLOBAL_UPDATE_FACTOR = 2.0; 1163 1164 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1165 (_res_node_num + _sup_node_num * _sup_node_num)); 1166 int next_update_limit = global_update_freq; 1167 1168 int relabel_cnt = 0; 1169 1028 1170 // Perform cost scaling phases 1029 1171 BoolVector hyper(_res_node_num, false); 1172 LargeCostVector hyper_cost(_res_node_num); 1030 1173 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1031 1174 1 : _epsilon / _alpha ) 1032 1175 { 1033 // "Early Termination" heuristic: use Bellman-Ford algorithm 1034 // to check if the current flow is optimal 1035 if (_epsilon <= BF_HEURISTIC_EPSILON_BOUND) { 1036 _arc_vec.clear(); 1037 _cost_vec.clear(); 1038 for (int j = 0; j != _res_arc_num; ++j) { 1039 if (_res_cap[j] > 0) { 1040 _arc_vec.push_back(IntPair(_source[j], _target[j])); 1041 _cost_vec.push_back(_cost[j] + 1); 1042 } 1043 } 1044 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 1045 1046 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 1047 bf.init(0); 1048 bool done = false; 1049 int K = int(BF_HEURISTIC_BOUND_FACTOR * sqrt(_res_node_num)); 1050 for (int i = 0; i < K && !done; ++i) 1051 done = bf.processNextWeakRound(); 1052 if (done) break; 1053 } 1054 1055 // Saturate arcs not satisfying the optimality condition 1056 for (int a = 0; a != _res_arc_num; ++a) { 1057 if (_res_cap[a] > 0 && 1058 _cost[a] + _pi[_source[a]] - _pi[_target[a]] < 0) { 1059 Value delta = _res_cap[a]; 1060 _excess[_source[a]] -= delta; 1061 _excess[_target[a]] += delta; 1062 _res_cap[a] = 0; 1063 _res_cap[_reverse[a]] += delta; 1064 } 1065 } 1066 1067 // Find active nodes (i.e. nodes with positive excess) 1068 for (int u = 0; u != _res_node_num; ++u) { 1069 if (_excess[u] > 0) _active_nodes.push_back(u); 1070 } 1071 1072 // Initialize the next arcs 1073 for (int u = 0; u != _res_node_num; ++u) { 1074 _next_out[u] = _first_out[u]; 1075 } 1176 // Early termination heuristic 1177 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1178 if (earlyTermination()) break; 1179 } 1180 1181 // Initialize current phase 1182 initPhase(); 1076 1183 1077 1184 // Perform push and relabel operations 1078 1185 while (_active_nodes.size() > 0) { 1079 LargeCost min_red_cost, rc ;1186 LargeCost min_red_cost, rc, pi_n; 1080 1187 Value delta; 1081 1188 int n, t, a, last_out = _res_arc_num; 1082 1189 1190 next_node: 1083 1191 // Select an active node (FIFO selection) 1084 next_node:1085 1192 n = _active_nodes.front(); 1086 last_out = _ sum_supply < 0 ?1087 _first_out[n+1] : _first_out[n+1] - 1;1088 1193 last_out = _first_out[n+1]; 1194 pi_n = _pi[n]; 1195 1089 1196 // Perform push operations if there are admissible arcs 1090 1197 if (_excess[n] > 0) { 1091 1198 for (a = _next_out[n]; a != last_out; ++a) { 1092 1199 if (_res_cap[a] > 0 && 1093 _cost[a] + _pi[_source[a]]- _pi[_target[a]] < 0) {1200 _cost[a] + pi_n - _pi[_target[a]] < 0) { 1094 1201 delta = std::min(_res_cap[a], _excess[n]); 1095 1202 t = _target[a]; … … 1097 1204 // Push-look-ahead heuristic 1098 1205 Value ahead = -_excess[t]; 1099 int last_out_t = _ sum_supply < 0 ?1100 _first_out[t+1] : _first_out[t+1] - 1;1206 int last_out_t = _first_out[t+1]; 1207 LargeCost pi_t = _pi[t]; 1101 1208 for (int ta = _next_out[t]; ta != last_out_t; ++ta) { 1102 1209 if (_res_cap[ta] > 0 && 1103 _cost[ta] + _pi[_source[ta]]- _pi[_target[ta]] < 0)1210 _cost[ta] + pi_t - _pi[_target[ta]] < 0) 1104 1211 ahead += _res_cap[ta]; 1105 1212 if (ahead >= delta) break; … … 1108 1215 1109 1216 // Push flow along the arc 1110 if (ahead < delta ) {1217 if (ahead < delta && !hyper[t]) { 1111 1218 _res_cap[a] -= ahead; 1112 1219 _res_cap[_reverse[a]] += ahead; … … 1115 1222 _active_nodes.push_front(t); 1116 1223 hyper[t] = true; 1224 hyper_cost[t] = _cost[a] + pi_n - pi_t; 1117 1225 _next_out[n] = a; 1118 1226 goto next_node; … … 1137 1245 // Relabel the node if it is still active (or hyper) 1138 1246 if (_excess[n] > 0 || hyper[n]) { 1139 min_red_cost = std::numeric_limits<LargeCost>::max() / 2; 1247 min_red_cost = hyper[n] ? -hyper_cost[n] : 1248 std::numeric_limits<LargeCost>::max(); 1140 1249 for (int a = _first_out[n]; a != last_out; ++a) { 1141 rc = _cost[a] + _pi[_source[a]]- _pi[_target[a]];1250 rc = _cost[a] + pi_n - _pi[_target[a]]; 1142 1251 if (_res_cap[a] > 0 && rc < min_red_cost) { 1143 1252 min_red_cost = rc; … … 1145 1254 } 1146 1255 _pi[n] -= min_red_cost + _epsilon; 1256 _next_out[n] = _first_out[n]; 1147 1257 hyper[n] = false; 1148 1149 // Reset the next arc 1150 _next_out[n] = _first_out[n]; 1258 ++relabel_cnt; 1151 1259 } 1152 1260 … … 1158 1266 _active_nodes.pop_front(); 1159 1267 } 1268 1269 // Global update heuristic 1270 if (relabel_cnt >= next_update_limit) { 1271 globalUpdate(); 1272 for (int u = 0; u != _res_node_num; ++u) 1273 hyper[u] = false; 1274 next_update_limit += global_update_freq; 1275 } 1160 1276 } 1161 1277 }
Note: See TracChangeset
for help on using the changeset viewer.