Changes in / [970:bd523dbc7033:971:22201ee8e437] in lemon-main
- Files:
-
- 7 added
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
CMakeLists.txt
r964 r966 115 115 SET(LEMON_HAVE_LONG_LONG ${HAVE_LONG_LONG}) 116 116 117 INCLUDE(FindPythonInterp)118 119 117 ENABLE_TESTING() 120 118 … … 127 125 ADD_SUBDIRECTORY(lemon) 128 126 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) 127 ADD_SUBDIRECTORY(contrib) 129 128 ADD_SUBDIRECTORY(demo) 130 129 ADD_SUBDIRECTORY(tools) -
doc/Doxyfile.in
r964 r966 96 96 "@abs_top_srcdir@/lemon/concepts" \ 97 97 "@abs_top_srcdir@/demo" \ 98 "@abs_top_srcdir@/contrib" \ 98 99 "@abs_top_srcdir@/tools" \ 99 100 "@abs_top_srcdir@/test/test_tools.h" \ -
doc/coding_style.dox
r440 r919 99 99 \subsection pri-loc-var Private member variables 100 100 101 Private member variables should start with underscore 101 Private member variables should start with underscore. 102 102 103 103 \code 104 _start_with_underscore s104 _start_with_underscore 105 105 \endcode 106 106 -
doc/dirs.dox
r440 r925 32 32 documentation. 33 33 */ 34 35 /** 36 \dir contrib 37 \brief Directory for user contributed source codes. 38 39 You can place your own C++ code using LEMON into this directory, which 40 will compile to an executable along with LEMON when you build the 41 library. This is probably the easiest way of compiling short to medium 42 codes, for this does require neither a LEMON installed system-wide nor 43 adding several paths to the compiler. 44 45 Please have a look at <tt>contrib/CMakeLists.txt</tt> for 46 instruction on how to add your own files into the build process. */ 34 47 35 48 /** -
doc/groups.dox
r879 r919 407 407 strongly polynomial \ref klein67primal, \ref goldberg89cyclecanceling. 408 408 409 In general NetworkSimplex is the most efficient implementation,410 but in special cases other algorithms could be faster.409 In general, \ref NetworkSimplex and \ref CostScaling are the most efficient 410 implementations, but the other two algorithms could be faster in special cases. 411 411 For example, if the total supply and/or capacities are rather small, 412 CapacityScaling is usually the fastest algorithm (without effective scaling).412 \ref CapacityScaling is usually the fastest algorithm (without effective scaling). 413 413 */ 414 414 … … 472 472 \ref dasdan98minmeancycle. 473 473 474 In practice, the \ref HowardMmc "Howard" algorithm provedto be by far the474 In practice, the \ref HowardMmc "Howard" algorithm turned out to be by far the 475 475 most efficient one, though the best known theoretical bound on its running 476 476 time is exponential. … … 540 540 541 541 /** 542 @defgroup planar Planar ityEmbedding and Drawing542 @defgroup planar Planar Embedding and Drawing 543 543 @ingroup algs 544 544 \brief Algorithms for planarity checking, embedding and drawing … … 552 552 553 553 /** 554 @defgroup approx Approximation Algorithms554 @defgroup approx_algs Approximation Algorithms 555 555 @ingroup algs 556 556 \brief Approximation algorithms. … … 558 558 This group contains the approximation and heuristic algorithms 559 559 implemented in LEMON. 560 561 <b>Maximum Clique Problem</b> 562 - \ref GrossoLocatelliPullanMc An efficient heuristic algorithm of 563 Grosso, Locatelli, and Pullan. 560 564 */ 561 565 -
doc/references.bib
r755 r904 298 298 address = {Dublin, Ireland}, 299 299 year = 1991, 300 month = sep, 301 } 300 month = sep 301 } 302 303 %%%%% Other algorithms %%%%% 304 305 @article{grosso08maxclique, 306 author = {Andrea Grosso and Marco Locatelli and Wayne Pullan}, 307 title = {Simple ingredients leading to very efficient 308 heuristics for the maximum clique problem}, 309 journal = {Journal of Heuristics}, 310 year = 2008, 311 volume = 14, 312 number = 6, 313 pages = {587--612} 314 } -
lemon/Makefile.am
r964 r966 92 92 lemon/graph_to_eps.h \ 93 93 lemon/grid_graph.h \ 94 lemon/grosso_locatelli_pullan_mc.h \ 94 95 lemon/hartmann_orlin_mmc.h \ 95 96 lemon/howard_mmc.h \ … … 108 109 lemon/math.h \ 109 110 lemon/min_cost_arborescence.h \ 111 lemon/max_cardinality_search.h \ 112 lemon/nagamochi_ibaraki.h \ 110 113 lemon/nauty_reader.h \ 111 114 lemon/network_simplex.h \ -
lemon/capacity_scaling.h
r877 r922 87 87 /// consider to use the named template parameters instead. 88 88 /// 89 /// \warning Both number types must be signed and all input data must 89 /// \warning Both \c V and \c C must be signed number types. 90 /// \warning All input data (capacities, supply values, and costs) must 90 91 /// be integer. 91 /// \warning This algorithm does not support negative costs for such92 /// arcs that haveinfinite upper bound.92 /// \warning This algorithm does not support negative costs for 93 /// arcs having infinite upper bound. 93 94 #ifdef DOXYGEN 94 95 template <typename GR, typename V, typename C, typename TR> … … 423 424 /// 424 425 /// Using this function has the same effect as using \ref supplyMap() 425 /// with sucha map in which \c k is assigned to \c s, \c -k is426 /// with a map in which \c k is assigned to \c s, \c -k is 426 427 /// assigned to \c t and all other nodes have zero supply value. 427 428 /// -
lemon/core.h
r964 r966 447 447 448 448 } 449 450 /// \brief Check whether a graph is undirected. 451 /// 452 /// This function returns \c true if the given graph is undirected. 453 #ifdef DOXYGEN 454 template <typename GR> 455 bool undirected(const GR& g) { return false; } 456 #else 457 template <typename GR> 458 typename enable_if<UndirectedTagIndicator<GR>, bool>::type 459 undirected(const GR&) { 460 return true; 461 } 462 template <typename GR> 463 typename disable_if<UndirectedTagIndicator<GR>, bool>::type 464 undirected(const GR&) { 465 return false; 466 } 467 #endif 449 468 450 469 /// \brief Class to copy a digraph. -
lemon/cost_scaling.h
r931 r938 98 98 /// "preflow push-relabel" algorithm for the maximum flow problem. 99 99 /// 100 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest 101 /// implementations available in LEMON for this problem. 102 /// 100 103 /// Most of the parameters of the problem (except for the digraph) 101 104 /// can be given using separate functions, and the algorithm can be … … 114 117 /// consider to use the named template parameters instead. 115 118 /// 116 /// \warning Both number types must be signed and all input data must 119 /// \warning Both \c V and \c C must be signed number types. 120 /// \warning All input data (capacities, supply values, and costs) must 117 121 /// be integer. 118 /// \warning This algorithm does not support negative costs for such119 /// arcs that haveinfinite upper bound.122 /// \warning This algorithm does not support negative costs for 123 /// arcs having infinite upper bound. 120 124 /// 121 125 /// \note %CostScaling provides three different internal methods, … … 179 183 /// relabel operation. 180 184 /// By default, the so called \ref PARTIAL_AUGMENT 181 /// "Partial Augment-Relabel" method is used, which provedto be185 /// "Partial Augment-Relabel" method is used, which turned out to be 182 186 /// the most efficient and the most robust on various test inputs. 183 187 /// However, the other methods can be selected using the \ref run() … … 234 238 }; 235 239 236 typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap;237 240 typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap; 238 241 … … 285 288 int _max_rank; 286 289 287 // Data for a StaticDigraph structure288 typedef std::pair<int, int> IntPair;289 StaticDigraph _sgr;290 std::vector<IntPair> _arc_vec;291 std::vector<LargeCost> _cost_vec;292 LargeCostArcMap _cost_map;293 LargeCostNodeMap _pi_map;294 295 290 public: 296 291 … … 339 334 CostScaling(const GR& graph) : 340 335 _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph), 341 _cost_map(_cost_vec), _pi_map(_pi),342 336 INF(std::numeric_limits<Value>::has_infinity ? 343 337 std::numeric_limits<Value>::infinity() : … … 448 442 /// 449 443 /// Using this function has the same effect as using \ref supplyMap() 450 /// with sucha map in which \c k is assigned to \c s, \c -k is444 /// with a map in which \c k is assigned to \c s, \c -k is 451 445 /// assigned to \c t and all other nodes have zero supply value. 452 446 /// … … 494 488 /// \param method The internal method that will be used in the 495 489 /// algorithm. For more information, see \ref Method. 496 /// \param factor The cost scaling factor. It must be larger than one.490 /// \param factor The cost scaling factor. It must be at least two. 497 491 /// 498 492 /// \return \c INFEASIBLE if no feasible flow exists, … … 508 502 /// \see ProblemType, Method 509 503 /// \see resetParams(), reset() 510 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) { 504 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) { 505 LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2"); 511 506 _alpha = factor; 512 507 ProblemType pt = init(); … … 572 567 } 573 568 574 /// \brief Reset all the parameters that have been given before. 575 /// 576 /// This function resets all the paramaters that have been given 577 /// before using functions \ref lowerMap(), \ref upperMap(), 578 /// \ref costMap(), \ref supplyMap(), \ref stSupply(). 579 /// 580 /// It is useful for multiple run() calls. If this function is not 581 /// used, all the parameters given before are kept for the next 582 /// \ref run() call. 583 /// However, the underlying digraph must not be modified after this 584 /// class have been constructed, since it copies and extends the graph. 569 /// \brief Reset the internal data structures and all the parameters 570 /// that have been given before. 571 /// 572 /// This function resets the internal data structures and all the 573 /// paramaters that have been given before using functions \ref lowerMap(), 574 /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). 575 /// 576 /// It is useful for multiple \ref run() calls. By default, all the given 577 /// parameters are kept for the next \ref run() call, unless 578 /// \ref resetParams() or \ref reset() is used. 579 /// If the underlying digraph was also modified after the construction 580 /// of the class or the last \ref reset() call, then the \ref reset() 581 /// function must be used, otherwise \ref resetParams() is sufficient. 582 /// 583 /// See \ref resetParams() for examples. 584 /// 585 585 /// \return <tt>(*this)</tt> 586 /// 587 /// \see resetParams(), run() 586 588 CostScaling& reset() { 587 589 // Resize vectors … … 608 610 _excess.resize(_res_node_num); 609 611 _next_out.resize(_res_node_num); 610 611 _arc_vec.reserve(_res_arc_num);612 _cost_vec.reserve(_res_arc_num);613 612 614 613 // Copy the graph … … 887 886 } 888 887 889 return OPTIMAL;890 }891 892 // Execute the algorithm and transform the results893 void start(Method method) {894 // Maximum path length for partial augment895 const int MAX_PATH_LENGTH = 4;896 897 888 // Initialize data structures for buckets 898 889 _max_rank = _alpha * _res_node_num; … … 902 893 _rank.resize(_res_node_num + 1); 903 894 904 // Execute the algorithm 895 return OPTIMAL; 896 } 897 898 // Execute the algorithm and transform the results 899 void start(Method method) { 900 const int MAX_PARTIAL_PATH_LENGTH = 4; 901 905 902 switch (method) { 906 903 case PUSH: … … 911 908 break; 912 909 case PARTIAL_AUGMENT: 913 startAugment(MAX_PA TH_LENGTH);910 startAugment(MAX_PARTIAL_PATH_LENGTH); 914 911 break; 915 912 } 916 913 917 // Compute node potentials for the original costs 918 _arc_vec.clear(); 919 _cost_vec.clear(); 920 for (int j = 0; j != _res_arc_num; ++j) { 921 if (_res_cap[j] > 0) { 922 _arc_vec.push_back(IntPair(_source[j], _target[j])); 923 _cost_vec.push_back(_scost[j]); 924 } 925 } 926 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 927 928 typename BellmanFord<StaticDigraph, LargeCostArcMap> 929 ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map); 930 bf.distMap(_pi_map); 931 bf.init(0); 932 bf.start(); 914 // Compute node potentials (dual solution) 915 for (int i = 0; i != _res_node_num; ++i) { 916 _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha)); 917 } 918 bool optimal = true; 919 for (int i = 0; optimal && i != _res_node_num; ++i) { 920 LargeCost pi_i = _pi[i]; 921 int last_out = _first_out[i+1]; 922 for (int j = _first_out[i]; j != last_out; ++j) { 923 if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) { 924 optimal = false; 925 break; 926 } 927 } 928 } 929 930 if (!optimal) { 931 // Compute node potentials for the original costs with BellmanFord 932 // (if it is necessary) 933 typedef std::pair<int, int> IntPair; 934 StaticDigraph sgr; 935 std::vector<IntPair> arc_vec; 936 std::vector<LargeCost> cost_vec; 937 LargeCostArcMap cost_map(cost_vec); 938 939 arc_vec.clear(); 940 cost_vec.clear(); 941 for (int j = 0; j != _res_arc_num; ++j) { 942 if (_res_cap[j] > 0) { 943 int u = _source[j], v = _target[j]; 944 arc_vec.push_back(IntPair(u, v)); 945 cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]); 946 } 947 } 948 sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end()); 949 950 typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create 951 bf(sgr, cost_map); 952 bf.init(0); 953 bf.start(); 954 955 for (int i = 0; i != _res_node_num; ++i) { 956 _pi[i] += bf.dist(sgr.node(i)); 957 } 958 } 959 960 // Shift potentials to meet the requirements of the GEQ type 961 // optimality conditions 962 LargeCost max_pot = _pi[_root]; 963 for (int i = 0; i != _res_node_num; ++i) { 964 if (_pi[i] > max_pot) max_pot = _pi[i]; 965 } 966 if (max_pot != 0) { 967 for (int i = 0; i != _res_node_num; ++i) { 968 _pi[i] -= max_pot; 969 } 970 } 933 971 934 972 // Handle non-zero lower bounds … … 948 986 LargeCost pi_u = _pi[u]; 949 987 for (int a = _first_out[u]; a != last_out; ++a) { 950 int v = _target[a]; 951 if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) { 952 Value delta = _res_cap[a]; 953 _excess[u] -= delta; 954 _excess[v] += delta; 955 _res_cap[a] = 0; 956 _res_cap[_reverse[a]] += delta; 988 Value delta = _res_cap[a]; 989 if (delta > 0) { 990 int v = _target[a]; 991 if (_cost[a] + pi_u - _pi[v] < 0) { 992 _excess[u] -= delta; 993 _excess[v] += delta; 994 _res_cap[a] = 0; 995 _res_cap[_reverse[a]] += delta; 996 } 957 997 } 958 998 } … … 970 1010 } 971 1011 972 // Early termination heuristic 973 bool earlyTermination() { 974 const double EARLY_TERM_FACTOR = 3.0; 975 976 // Build a static residual graph 977 _arc_vec.clear(); 978 _cost_vec.clear(); 979 for (int j = 0; j != _res_arc_num; ++j) { 980 if (_res_cap[j] > 0) { 981 _arc_vec.push_back(IntPair(_source[j], _target[j])); 982 _cost_vec.push_back(_cost[j] + 1); 983 } 984 } 985 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 986 987 // Run Bellman-Ford algorithm to check if the current flow is optimal 988 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 989 bf.init(0); 990 bool done = false; 991 int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num))); 992 for (int i = 0; i < K && !done; ++i) { 993 done = bf.processNextWeakRound(); 994 } 995 return done; 1012 // Price (potential) refinement heuristic 1013 bool priceRefinement() { 1014 1015 // Stack for stroing the topological order 1016 IntVector stack(_res_node_num); 1017 int stack_top; 1018 1019 // Perform phases 1020 while (topologicalSort(stack, stack_top)) { 1021 1022 // Compute node ranks in the acyclic admissible network and 1023 // store the nodes in buckets 1024 for (int i = 0; i != _res_node_num; ++i) { 1025 _rank[i] = 0; 1026 } 1027 const int bucket_end = _root + 1; 1028 for (int r = 0; r != _max_rank; ++r) { 1029 _buckets[r] = bucket_end; 1030 } 1031 int top_rank = 0; 1032 for ( ; stack_top >= 0; --stack_top) { 1033 int u = stack[stack_top], v; 1034 int rank_u = _rank[u]; 1035 1036 LargeCost rc, pi_u = _pi[u]; 1037 int last_out = _first_out[u+1]; 1038 for (int a = _first_out[u]; a != last_out; ++a) { 1039 if (_res_cap[a] > 0) { 1040 v = _target[a]; 1041 rc = _cost[a] + pi_u - _pi[v]; 1042 if (rc < 0) { 1043 LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon); 1044 if (nrc < LargeCost(_max_rank)) { 1045 int new_rank_v = rank_u + static_cast<int>(nrc); 1046 if (new_rank_v > _rank[v]) { 1047 _rank[v] = new_rank_v; 1048 } 1049 } 1050 } 1051 } 1052 } 1053 1054 if (rank_u > 0) { 1055 top_rank = std::max(top_rank, rank_u); 1056 int bfirst = _buckets[rank_u]; 1057 _bucket_next[u] = bfirst; 1058 _bucket_prev[bfirst] = u; 1059 _buckets[rank_u] = u; 1060 } 1061 } 1062 1063 // Check if the current flow is epsilon-optimal 1064 if (top_rank == 0) { 1065 return true; 1066 } 1067 1068 // Process buckets in top-down order 1069 for (int rank = top_rank; rank > 0; --rank) { 1070 while (_buckets[rank] != bucket_end) { 1071 // Remove the first node from the current bucket 1072 int u = _buckets[rank]; 1073 _buckets[rank] = _bucket_next[u]; 1074 1075 // Search the outgoing arcs of u 1076 LargeCost rc, pi_u = _pi[u]; 1077 int last_out = _first_out[u+1]; 1078 int v, old_rank_v, new_rank_v; 1079 for (int a = _first_out[u]; a != last_out; ++a) { 1080 if (_res_cap[a] > 0) { 1081 v = _target[a]; 1082 old_rank_v = _rank[v]; 1083 1084 if (old_rank_v < rank) { 1085 1086 // Compute the new rank of node v 1087 rc = _cost[a] + pi_u - _pi[v]; 1088 if (rc < 0) { 1089 new_rank_v = rank; 1090 } else { 1091 LargeCost nrc = rc / _epsilon; 1092 new_rank_v = 0; 1093 if (nrc < LargeCost(_max_rank)) { 1094 new_rank_v = rank - 1 - static_cast<int>(nrc); 1095 } 1096 } 1097 1098 // Change the rank of node v 1099 if (new_rank_v > old_rank_v) { 1100 _rank[v] = new_rank_v; 1101 1102 // Remove v from its old bucket 1103 if (old_rank_v > 0) { 1104 if (_buckets[old_rank_v] == v) { 1105 _buckets[old_rank_v] = _bucket_next[v]; 1106 } else { 1107 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1108 _bucket_next[pv] = nv; 1109 _bucket_prev[nv] = pv; 1110 } 1111 } 1112 1113 // Insert v into its new bucket 1114 int nv = _buckets[new_rank_v]; 1115 _bucket_next[v] = nv; 1116 _bucket_prev[nv] = v; 1117 _buckets[new_rank_v] = v; 1118 } 1119 } 1120 } 1121 } 1122 1123 // Refine potential of node u 1124 _pi[u] -= rank * _epsilon; 1125 } 1126 } 1127 1128 } 1129 1130 return false; 1131 } 1132 1133 // Find and cancel cycles in the admissible network and 1134 // determine topological order using DFS 1135 bool topologicalSort(IntVector &stack, int &stack_top) { 1136 const int MAX_CYCLE_CANCEL = 1; 1137 1138 BoolVector reached(_res_node_num, false); 1139 BoolVector processed(_res_node_num, false); 1140 IntVector pred(_res_node_num); 1141 for (int i = 0; i != _res_node_num; ++i) { 1142 _next_out[i] = _first_out[i]; 1143 } 1144 stack_top = -1; 1145 1146 int cycle_cnt = 0; 1147 for (int start = 0; start != _res_node_num; ++start) { 1148 if (reached[start]) continue; 1149 1150 // Start DFS search from this start node 1151 pred[start] = -1; 1152 int tip = start, v; 1153 while (true) { 1154 // Check the outgoing arcs of the current tip node 1155 reached[tip] = true; 1156 LargeCost pi_tip = _pi[tip]; 1157 int a, last_out = _first_out[tip+1]; 1158 for (a = _next_out[tip]; a != last_out; ++a) { 1159 if (_res_cap[a] > 0) { 1160 v = _target[a]; 1161 if (_cost[a] + pi_tip - _pi[v] < 0) { 1162 if (!reached[v]) { 1163 // A new node is reached 1164 reached[v] = true; 1165 pred[v] = tip; 1166 _next_out[tip] = a; 1167 tip = v; 1168 a = _next_out[tip]; 1169 last_out = _first_out[tip+1]; 1170 break; 1171 } 1172 else if (!processed[v]) { 1173 // A cycle is found 1174 ++cycle_cnt; 1175 _next_out[tip] = a; 1176 1177 // Find the minimum residual capacity along the cycle 1178 Value d, delta = _res_cap[a]; 1179 int u, delta_node = tip; 1180 for (u = tip; u != v; ) { 1181 u = pred[u]; 1182 d = _res_cap[_next_out[u]]; 1183 if (d <= delta) { 1184 delta = d; 1185 delta_node = u; 1186 } 1187 } 1188 1189 // Augment along the cycle 1190 _res_cap[a] -= delta; 1191 _res_cap[_reverse[a]] += delta; 1192 for (u = tip; u != v; ) { 1193 u = pred[u]; 1194 int ca = _next_out[u]; 1195 _res_cap[ca] -= delta; 1196 _res_cap[_reverse[ca]] += delta; 1197 } 1198 1199 // Check the maximum number of cycle canceling 1200 if (cycle_cnt >= MAX_CYCLE_CANCEL) { 1201 return false; 1202 } 1203 1204 // Roll back search to delta_node 1205 if (delta_node != tip) { 1206 for (u = tip; u != delta_node; u = pred[u]) { 1207 reached[u] = false; 1208 } 1209 tip = delta_node; 1210 a = _next_out[tip] + 1; 1211 last_out = _first_out[tip+1]; 1212 break; 1213 } 1214 } 1215 } 1216 } 1217 } 1218 1219 // Step back to the previous node 1220 if (a == last_out) { 1221 processed[tip] = true; 1222 stack[++stack_top] = tip; 1223 tip = pred[tip]; 1224 if (tip < 0) { 1225 // Finish DFS from the current start node 1226 break; 1227 } 1228 ++_next_out[tip]; 1229 } 1230 } 1231 1232 } 1233 1234 return (cycle_cnt == 0); 996 1235 } 997 1236 998 1237 // Global potential update heuristic 999 1238 void globalUpdate() { 1000 int bucket_end = _root + 1;1239 const int bucket_end = _root + 1; 1001 1240 1002 1241 // Initialize buckets … … 1005 1244 } 1006 1245 Value total_excess = 0; 1246 int b0 = bucket_end; 1007 1247 for (int i = 0; i != _res_node_num; ++i) { 1008 1248 if (_excess[i] < 0) { 1009 1249 _rank[i] = 0; 1010 _bucket_next[i] = _buckets[0];1011 _bucket_prev[ _buckets[0]] = i;1012 _buckets[0]= i;1250 _bucket_next[i] = b0; 1251 _bucket_prev[b0] = i; 1252 b0 = i; 1013 1253 } else { 1014 1254 total_excess += _excess[i]; … … 1017 1257 } 1018 1258 if (total_excess == 0) return; 1259 _buckets[0] = b0; 1019 1260 1020 1261 // Search the buckets … … 1038 1279 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; 1039 1280 int new_rank_v = old_rank_v; 1040 if (nrc < LargeCost(_max_rank)) 1041 new_rank_v = r + 1 + int(nrc); 1281 if (nrc < LargeCost(_max_rank)) { 1282 new_rank_v = r + 1 + static_cast<int>(nrc); 1283 } 1042 1284 1043 1285 // Change the rank of v … … 1051 1293 _buckets[old_rank_v] = _bucket_next[v]; 1052 1294 } else { 1053 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; 1054 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; 1295 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1296 _bucket_next[pv] = nv; 1297 _bucket_prev[nv] = pv; 1055 1298 } 1056 1299 } 1057 1300 1058 // Insert v to its new bucket 1059 _bucket_next[v] = _buckets[new_rank_v]; 1060 _bucket_prev[_buckets[new_rank_v]] = v; 1301 // Insert v into its new bucket 1302 int nv = _buckets[new_rank_v]; 1303 _bucket_next[v] = nv; 1304 _bucket_prev[nv] = v; 1061 1305 _buckets[new_rank_v] = v; 1062 1306 } … … 1087 1331 void startAugment(int max_length) { 1088 1332 // Paramters for heuristics 1089 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1090 const double GLOBAL_UPDATE_FACTOR = 3.0; 1091 1092 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1333 const int PRICE_REFINEMENT_LIMIT = 2; 1334 const double GLOBAL_UPDATE_FACTOR = 1.0; 1335 const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * 1093 1336 (_res_node_num + _sup_node_num * _sup_node_num)); 1094 int next_update_limit = global_update_freq; 1095 1337 int next_global_update_limit = global_update_skip; 1338 1339 // Perform cost scaling phases 1340 IntVector path; 1341 BoolVector path_arc(_res_arc_num, false); 1096 1342 int relabel_cnt = 0; 1097 1098 // Perform cost scaling phases 1099 std::vector<int> path; 1343 int eps_phase_cnt = 0; 1100 1344 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1101 1345 1 : _epsilon / _alpha ) 1102 1346 { 1103 // Early termination heuristic 1104 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1105 if (earlyTermination()) break; 1347 ++eps_phase_cnt; 1348 1349 // Price refinement heuristic 1350 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1351 if (priceRefinement()) continue; 1106 1352 } 1107 1353 … … 1120 1366 1121 1367 // Find an augmenting path from the start node 1122 path.clear();1123 1368 int tip = start; 1124 while ( _excess[tip] >= 0 && int(path.size()) < max_length) {1369 while (int(path.size()) < max_length && _excess[tip] >= 0) { 1125 1370 int u; 1126 LargeCost min_red_cost, rc, pi_tip = _pi[tip]; 1371 LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max(); 1372 LargeCost pi_tip = _pi[tip]; 1127 1373 int last_out = _first_out[tip+1]; 1128 1374 for (int a = _next_out[tip]; a != last_out; ++a) { 1129 u = _target[a]; 1130 if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) { 1131 path.push_back(a); 1132 _next_out[tip] = a; 1133 tip = u; 1134 goto next_step; 1375 if (_res_cap[a] > 0) { 1376 u = _target[a]; 1377 rc = _cost[a] + pi_tip - _pi[u]; 1378 if (rc < 0) { 1379 path.push_back(a); 1380 _next_out[tip] = a; 1381 if (path_arc[a]) { 1382 goto augment; // a cycle is found, stop path search 1383 } 1384 tip = u; 1385 path_arc[a] = true; 1386 goto next_step; 1387 } 1388 else if (rc < min_red_cost) { 1389 min_red_cost = rc; 1390 } 1135 1391 } 1136 1392 } 1137 1393 1138 1394 // Relabel tip node 1139 min_red_cost = std::numeric_limits<LargeCost>::max();1140 1395 if (tip != start) { 1141 1396 int ra = _reverse[path.back()]; 1142 min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]]; 1397 min_red_cost = 1398 std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]); 1143 1399 } 1400 last_out = _next_out[tip]; 1144 1401 for (int a = _first_out[tip]; a != last_out; ++a) { 1145 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1146 if (_res_cap[a] > 0 && rc < min_red_cost) { 1147 min_red_cost = rc; 1402 if (_res_cap[a] > 0) { 1403 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1404 if (rc < min_red_cost) { 1405 min_red_cost = rc; 1406 } 1148 1407 } 1149 1408 } … … 1154 1413 // Step back 1155 1414 if (tip != start) { 1156 tip = _source[path.back()]; 1415 int pa = path.back(); 1416 path_arc[pa] = false; 1417 tip = _source[pa]; 1157 1418 path.pop_back(); 1158 1419 } … … 1162 1423 1163 1424 // Augment along the found path (as much flow as possible) 1425 augment: 1164 1426 Value delta; 1165 1427 int pa, u, v = start; … … 1168 1430 u = v; 1169 1431 v = _target[pa]; 1432 path_arc[pa] = false; 1170 1433 delta = std::min(_res_cap[pa], _excess[u]); 1171 1434 _res_cap[pa] -= delta; … … 1173 1436 _excess[u] -= delta; 1174 1437 _excess[v] += delta; 1175 if (_excess[v] > 0 && _excess[v] <= delta) 1438 if (_excess[v] > 0 && _excess[v] <= delta) { 1176 1439 _active_nodes.push_back(v); 1177 } 1440 } 1441 } 1442 path.clear(); 1178 1443 1179 1444 // Global update heuristic 1180 if (relabel_cnt >= next_ update_limit) {1445 if (relabel_cnt >= next_global_update_limit) { 1181 1446 globalUpdate(); 1182 next_update_limit += global_update_freq; 1183 } 1184 } 1185 } 1447 next_global_update_limit += global_update_skip; 1448 } 1449 } 1450 1451 } 1452 1186 1453 } 1187 1454 … … 1189 1456 void startPush() { 1190 1457 // Paramters for heuristics 1191 const int EARLY_TERM_EPSILON_LIMIT = 1000;1458 const int PRICE_REFINEMENT_LIMIT = 2; 1192 1459 const double GLOBAL_UPDATE_FACTOR = 2.0; 1193 1460 1194 const int global_update_ freq = int(GLOBAL_UPDATE_FACTOR *1461 const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * 1195 1462 (_res_node_num + _sup_node_num * _sup_node_num)); 1196 int next_update_limit = global_update_freq; 1197 1198 int relabel_cnt = 0; 1463 int next_global_update_limit = global_update_skip; 1199 1464 1200 1465 // Perform cost scaling phases 1201 1466 BoolVector hyper(_res_node_num, false); 1202 1467 LargeCostVector hyper_cost(_res_node_num); 1468 int relabel_cnt = 0; 1469 int eps_phase_cnt = 0; 1203 1470 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1204 1471 1 : _epsilon / _alpha ) 1205 1472 { 1206 // Early termination heuristic 1207 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1208 if (earlyTermination()) break; 1473 ++eps_phase_cnt; 1474 1475 // Price refinement heuristic 1476 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1477 if (priceRefinement()) continue; 1209 1478 } 1210 1479 … … 1278 1547 std::numeric_limits<LargeCost>::max(); 1279 1548 for (int a = _first_out[n]; a != last_out; ++a) { 1280 rc = _cost[a] + pi_n - _pi[_target[a]]; 1281 if (_res_cap[a] > 0 && rc < min_red_cost) { 1282 min_red_cost = rc; 1549 if (_res_cap[a] > 0) { 1550 rc = _cost[a] + pi_n - _pi[_target[a]]; 1551 if (rc < min_red_cost) { 1552 min_red_cost = rc; 1553 } 1283 1554 } 1284 1555 } … … 1298 1569 1299 1570 // Global update heuristic 1300 if (relabel_cnt >= next_ update_limit) {1571 if (relabel_cnt >= next_global_update_limit) { 1301 1572 globalUpdate(); 1302 1573 for (int u = 0; u != _res_node_num; ++u) 1303 1574 hyper[u] = false; 1304 next_ update_limit += global_update_freq;1575 next_global_update_limit += global_update_skip; 1305 1576 } 1306 1577 } -
lemon/cycle_canceling.h
r877 r922 66 66 /// algorithm. By default, it is the same as \c V. 67 67 /// 68 /// \warning Both number types must be signed and all input data must 68 /// \warning Both \c V and \c C must be signed number types. 69 /// \warning All input data (capacities, supply values, and costs) must 69 70 /// be integer. 70 /// \warning This algorithm does not support negative costs for such71 /// arcs that haveinfinite upper bound.71 /// \warning This algorithm does not support negative costs for 72 /// arcs having infinite upper bound. 72 73 /// 73 74 /// \note For more information about the three available methods, … … 117 118 /// \ref CycleCanceling provides three different cycle-canceling 118 119 /// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel and Tighten" 119 /// is used, which proved to be the most efficient and the most robust 120 /// on various test inputs. 120 /// is used, which is by far the most efficient and the most robust. 121 121 /// However, the other methods can be selected using the \ref run() 122 122 /// function with the proper parameter. … … 350 350 /// 351 351 /// Using this function has the same effect as using \ref supplyMap() 352 /// with sucha map in which \c k is assigned to \c s, \c -k is352 /// with a map in which \c k is assigned to \c s, \c -k is 353 353 /// assigned to \c t and all other nodes have zero supply value. 354 354 /// -
lemon/euler.h
r877 r919 37 37 ///Euler tour iterator for digraphs. 38 38 39 /// \ingroup graph_prop 39 /// \ingroup graph_properties 40 40 ///This iterator provides an Euler tour (Eulerian circuit) of a \e directed 41 41 ///graph (if there exists) and it converts to the \c Arc type of the digraph. -
lemon/hao_orlin.h
r877 r915 54 54 /// preflow push-relabel algorithm. Our implementation calculates 55 55 /// the minimum cut in \f$ O(n^2\sqrt{m}) \f$ time (we use the 56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. The57 /// purpose of such algorithm is e.g.testing network reliability.56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. A notable 57 /// use of this algorithm is testing network reliability. 58 58 /// 59 59 /// For an undirected graph you can run just the first phase of the … … 913 913 /// source-side (i.e. a set \f$ X\subsetneq V \f$ with 914 914 /// \f$ source \in X \f$ and minimal outgoing capacity). 915 /// It updates the stored cut if (and only if) the newly found one 916 /// is better. 915 917 /// 916 918 /// \pre \ref init() must be called before using this function. … … 925 927 /// sink-side (i.e. a set \f$ X\subsetneq V \f$ with 926 928 /// \f$ source \notin X \f$ and minimal outgoing capacity). 929 /// It updates the stored cut if (and only if) the newly found one 930 /// is better. 927 931 /// 928 932 /// \pre \ref init() must be called before using this function. … … 934 938 /// \brief Run the algorithm. 935 939 /// 936 /// This function runs the algorithm. It finds nodes \c source and937 /// \c target arbitrarily andthen calls \ref init(), \ref calculateOut()940 /// This function runs the algorithm. It chooses source node, 941 /// then calls \ref init(), \ref calculateOut() 938 942 /// and \ref calculateIn(). 939 943 void run() { … … 945 949 /// \brief Run the algorithm. 946 950 /// 947 /// This function runs the algorithm. It uses the given \c source node,948 /// finds a proper \c target node and then calls the \ref init(),949 /// \ref calculateOut() and \ref calculateIn().951 /// This function runs the algorithm. It calls \ref init(), 952 /// \ref calculateOut() and \ref calculateIn() with the given 953 /// source node. 950 954 void run(const Node& s) { 951 955 init(s); … … 966 970 /// \brief Return the value of the minimum cut. 967 971 /// 968 /// This function returns the value of the minimum cut. 972 /// This function returns the value of the best cut found by the 973 /// previously called \ref run(), \ref calculateOut() or \ref 974 /// calculateIn(). 969 975 /// 970 976 /// \pre \ref run(), \ref calculateOut() or \ref calculateIn() … … 977 983 /// \brief Return a minimum cut. 978 984 /// 979 /// This function sets \c cutMap to the characteristic vector of a 980 /// minimum value cut: it will give a non-empty set \f$ X\subsetneq V \f$ 981 /// with minimal outgoing capacity (i.e. \c cutMap will be \c true exactly 985 /// This function gives the best cut found by the 986 /// previously called \ref run(), \ref calculateOut() or \ref 987 /// calculateIn(). 988 /// 989 /// It sets \c cutMap to the characteristic vector of the found 990 /// minimum value cut - a non-empty set \f$ X\subsetneq V \f$ 991 /// of minimum outgoing capacity (i.e. \c cutMap will be \c true exactly 982 992 /// for the nodes of \f$ X \f$). 983 993 /// -
lemon/kruskal.h
r584 r921 31 31 ///\file 32 32 ///\brief Kruskal's algorithm to compute a minimum cost spanning tree 33 ///34 ///Kruskal's algorithm to compute a minimum cost spanning tree.35 ///36 33 37 34 namespace lemon { -
lemon/network_simplex.h
r889 r922 48 48 /// flow problem. 49 49 /// 50 /// In general, %NetworkSimplex is the fastest implementation available51 /// i n LEMON for this problem.52 /// Moreover, it supports both directions of the supply/demand inequality53 /// constraints. For more information, see \ref SupplyType.50 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest 51 /// implementations available in LEMON for this problem. 52 /// Furthermore, this class supports both directions of the supply/demand 53 /// inequality constraints. For more information, see \ref SupplyType. 54 54 /// 55 55 /// Most of the parameters of the problem (except for the digraph) … … 64 64 /// algorithm. By default, it is the same as \c V. 65 65 /// 66 /// \warning Both number types must be signed and all input data must 66 /// \warning Both \c V and \c C must be signed number types. 67 /// \warning All input data (capacities, supply values, and costs) must 67 68 /// be integer. 68 69 /// … … 126 127 /// of the algorithm. 127 128 /// By default, \ref BLOCK_SEARCH "Block Search" is used, which 128 /// provedto be the most efficient and the most robust on various129 /// turend out to be the most efficient and the most robust on various 129 130 /// test inputs. 130 131 /// However, another pivot rule can be selected using the \ref run() … … 167 168 typedef std::vector<Value> ValueVector; 168 169 typedef std::vector<Cost> CostVector; 169 typedef std::vector<char> BoolVector; 170 // Note: vector<char> is used instead of vector<bool> for efficiency reasons 170 typedef std::vector<signed char> CharVector; 171 // Note: vector<signed char> is used instead of vector<ArcState> and 172 // vector<ArcDirection> for efficiency reasons 171 173 172 174 // State constants for arcs … … 177 179 }; 178 180 179 typedef std::vector<signed char> StateVector; 180 // Note: vector<signed char> is used instead of vector<ArcState> for 181 // efficiency reasons 181 // Direction constants for tree arcs 182 enum ArcDirection { 183 DIR_DOWN = -1, 184 DIR_UP = 1 185 }; 182 186 183 187 private: … … 218 222 IntVector _succ_num; 219 223 IntVector _last_succ; 224 CharVector _pred_dir; 225 CharVector _state; 220 226 IntVector _dirty_revs; 221 BoolVector _forward;222 StateVector _state;223 227 int _root; 224 228 225 229 // Temporary data used in the current pivot iteration 226 230 int in_arc, join, u_in, v_in, u_out, v_out; 227 int first, second, right, last;228 int stem, par_stem, new_stem;229 231 Value delta; 230 232 … … 251 253 const IntVector &_target; 252 254 const CostVector &_cost; 253 const StateVector &_state;255 const CharVector &_state; 254 256 const CostVector &_pi; 255 257 int &_in_arc; … … 303 305 const IntVector &_target; 304 306 const CostVector &_cost; 305 const StateVector &_state;307 const CharVector &_state; 306 308 const CostVector &_pi; 307 309 int &_in_arc; … … 342 344 const IntVector &_target; 343 345 const CostVector &_cost; 344 const StateVector &_state;346 const CharVector &_state; 345 347 const CostVector &_pi; 346 348 int &_in_arc; … … 415 417 const IntVector &_target; 416 418 const CostVector &_cost; 417 const StateVector &_state;419 const CharVector &_state; 418 420 const CostVector &_pi; 419 421 int &_in_arc; … … 518 520 const IntVector &_target; 519 521 const CostVector &_cost; 520 const StateVector &_state;522 const CharVector &_state; 521 523 const CostVector &_pi; 522 524 int &_in_arc; … … 571 573 // Check the current candidate list 572 574 int e; 575 Cost c; 573 576 for (int i = 0; i != _curr_length; ++i) { 574 577 e = _candidates[i]; 575 _cand_cost[e] = _state[e] * 576 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 577 if (_cand_cost[e] >= 0) { 578 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 579 if (c < 0) { 580 _cand_cost[e] = c; 581 } else { 578 582 _candidates[i--] = _candidates[--_curr_length]; 579 583 } … … 585 589 586 590 for (e = _next_arc; e != _search_arc_num; ++e) { 587 _cand_cost[e] = _state[e] *588 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);589 if (_cand_cost[e] < 0) {591 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 592 if (c < 0) { 593 _cand_cost[e] = c; 590 594 _candidates[_curr_length++] = e; 591 595 } … … 634 638 /// 635 639 /// \param graph The digraph the algorithm runs on. 636 /// \param arc_mixing Indicate if the arcs have tobe stored in a640 /// \param arc_mixing Indicate if the arcs will be stored in a 637 641 /// mixed order in the internal data structure. 638 /// In special cases, it could lead to better overall performance, 639 /// but it is usually slower. Therefore it is disabled by default. 640 NetworkSimplex(const GR& graph, bool arc_mixing = false) : 642 /// In general, it leads to similar performance as using the original 643 /// arc order, but it makes the algorithm more robust and in special 644 /// cases, even significantly faster. Therefore, it is enabled by default. 645 NetworkSimplex(const GR& graph, bool arc_mixing = true) : 641 646 _graph(graph), _node_id(graph), _arc_id(graph), 642 647 _arc_mixing(arc_mixing), … … 731 736 /// 732 737 /// \return <tt>(*this)</tt> 738 /// 739 /// \sa supplyType() 733 740 template<typename SupplyMap> 734 741 NetworkSimplex& supplyMap(const SupplyMap& map) { … … 747 754 /// 748 755 /// Using this function has the same effect as using \ref supplyMap() 749 /// with sucha map in which \c k is assigned to \c s, \c -k is756 /// with a map in which \c k is assigned to \c s, \c -k is 750 757 /// assigned to \c t and all other nodes have zero supply value. 751 758 /// … … 914 921 _parent.resize(all_node_num); 915 922 _pred.resize(all_node_num); 916 _ forward.resize(all_node_num);923 _pred_dir.resize(all_node_num); 917 924 _thread.resize(all_node_num); 918 925 _rev_thread.resize(all_node_num); … … 928 935 if (_arc_mixing) { 929 936 // Store the arcs in a mixed order 930 int k = std::max(int(std::sqrt(double(_arc_num))), 10);937 const int skip = std::max(_arc_num / _node_num, 3); 931 938 int i = 0, j = 0; 932 939 for (ArcIt a(_graph); a != INVALID; ++a) { … … 934 941 _source[i] = _node_id[_graph.source(a)]; 935 942 _target[i] = _node_id[_graph.target(a)]; 936 if ((i += k) >= _arc_num) i = ++j;943 if ((i += skip) >= _arc_num) i = ++j; 937 944 } 938 945 } else { … … 1117 1124 _state[e] = STATE_TREE; 1118 1125 if (_supply[u] >= 0) { 1119 _ forward[u] = true;1126 _pred_dir[u] = DIR_UP; 1120 1127 _pi[u] = 0; 1121 1128 _source[e] = u; … … 1124 1131 _cost[e] = 0; 1125 1132 } else { 1126 _ forward[u] = false;1133 _pred_dir[u] = DIR_DOWN; 1127 1134 _pi[u] = ART_COST; 1128 1135 _source[e] = _root; … … 1144 1151 _last_succ[u] = u; 1145 1152 if (_supply[u] >= 0) { 1146 _ forward[u] = true;1153 _pred_dir[u] = DIR_UP; 1147 1154 _pi[u] = 0; 1148 1155 _pred[u] = e; … … 1154 1161 _state[e] = STATE_TREE; 1155 1162 } else { 1156 _ forward[u] = false;1163 _pred_dir[u] = DIR_DOWN; 1157 1164 _pi[u] = ART_COST; 1158 1165 _pred[u] = f; … … 1185 1192 _last_succ[u] = u; 1186 1193 if (_supply[u] <= 0) { 1187 _ forward[u] = false;1194 _pred_dir[u] = DIR_DOWN; 1188 1195 _pi[u] = 0; 1189 1196 _pred[u] = e; … … 1195 1202 _state[e] = STATE_TREE; 1196 1203 } else { 1197 _ forward[u] = true;1204 _pred_dir[u] = DIR_UP; 1198 1205 _pi[u] = -ART_COST; 1199 1206 _pred[u] = f; … … 1238 1245 // Initialize first and second nodes according to the direction 1239 1246 // of the cycle 1247 int first, second; 1240 1248 if (_state[in_arc] == STATE_LOWER) { 1241 1249 first = _source[in_arc]; … … 1247 1255 delta = _cap[in_arc]; 1248 1256 int result = 0; 1249 Value d;1257 Value c, d; 1250 1258 int e; 1251 1259 1252 // Search the cycle along the path form the first node to the root1260 // Search the cycle form the first node to the join node 1253 1261 for (int u = first; u != join; u = _parent[u]) { 1254 1262 e = _pred[u]; 1255 d = _forward[u] ? 1256 _flow[e] : (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]); 1263 d = _flow[e]; 1264 if (_pred_dir[u] == DIR_DOWN) { 1265 c = _cap[e]; 1266 d = c >= MAX ? INF : c - d; 1267 } 1257 1268 if (d < delta) { 1258 1269 delta = d; … … 1261 1272 } 1262 1273 } 1263 // Search the cycle along the path form the second node to the root 1274 1275 // Search the cycle form the second node to the join node 1264 1276 for (int u = second; u != join; u = _parent[u]) { 1265 1277 e = _pred[u]; 1266 d = _forward[u] ? 1267 (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]) : _flow[e]; 1278 d = _flow[e]; 1279 if (_pred_dir[u] == DIR_UP) { 1280 c = _cap[e]; 1281 d = c >= MAX ? INF : c - d; 1282 } 1268 1283 if (d <= delta) { 1269 1284 delta = d; … … 1290 1305 _flow[in_arc] += val; 1291 1306 for (int u = _source[in_arc]; u != join; u = _parent[u]) { 1292 _flow[_pred[u]] += _forward[u] ? -val :val;1307 _flow[_pred[u]] -= _pred_dir[u] * val; 1293 1308 } 1294 1309 for (int u = _target[in_arc]; u != join; u = _parent[u]) { 1295 _flow[_pred[u]] += _ forward[u] ? val : -val;1310 _flow[_pred[u]] += _pred_dir[u] * val; 1296 1311 } 1297 1312 } … … 1308 1323 // Update the tree structure 1309 1324 void updateTreeStructure() { 1310 int u, w;1311 1325 int old_rev_thread = _rev_thread[u_out]; 1312 1326 int old_succ_num = _succ_num[u_out]; … … 1314 1328 v_out = _parent[u_out]; 1315 1329 1316 u = _last_succ[u_in]; // the last successor of u_in 1317 right = _thread[u]; // the node after it 1318 1319 // Handle the case when old_rev_thread equals to v_in 1320 // (it also means that join and v_out coincide) 1321 if (old_rev_thread == v_in) { 1322 last = _thread[_last_succ[u_out]]; 1330 // Check if u_in and u_out coincide 1331 if (u_in == u_out) { 1332 // Update _parent, _pred, _pred_dir 1333 _parent[u_in] = v_in; 1334 _pred[u_in] = in_arc; 1335 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1336 1337 // Update _thread and _rev_thread 1338 if (_thread[v_in] != u_out) { 1339 int after = _thread[old_last_succ]; 1340 _thread[old_rev_thread] = after; 1341 _rev_thread[after] = old_rev_thread; 1342 after = _thread[v_in]; 1343 _thread[v_in] = u_out; 1344 _rev_thread[u_out] = v_in; 1345 _thread[old_last_succ] = after; 1346 _rev_thread[after] = old_last_succ; 1347 } 1323 1348 } else { 1324 last = _thread[v_in]; 1325 } 1326 1327 // Update _thread and _parent along the stem nodes (i.e. the nodes 1328 // between u_in and u_out, whose parent have to be changed) 1329 _thread[v_in] = stem = u_in; 1330 _dirty_revs.clear(); 1331 _dirty_revs.push_back(v_in); 1332 par_stem = v_in; 1333 while (stem != u_out) { 1334 // Insert the next stem node into the thread list 1335 new_stem = _parent[stem]; 1336 _thread[u] = new_stem; 1337 _dirty_revs.push_back(u); 1338 1339 // Remove the subtree of stem from the thread list 1340 w = _rev_thread[stem]; 1341 _thread[w] = right; 1342 _rev_thread[right] = w; 1343 1344 // Change the parent node and shift stem nodes 1345 _parent[stem] = par_stem; 1346 par_stem = stem; 1347 stem = new_stem; 1348 1349 // Update u and right 1350 u = _last_succ[stem] == _last_succ[par_stem] ? 1351 _rev_thread[par_stem] : _last_succ[stem]; 1352 right = _thread[u]; 1353 } 1354 _parent[u_out] = par_stem; 1355 _thread[u] = last; 1356 _rev_thread[last] = u; 1357 _last_succ[u_out] = u; 1358 1359 // Remove the subtree of u_out from the thread list except for 1360 // the case when old_rev_thread equals to v_in 1361 // (it also means that join and v_out coincide) 1362 if (old_rev_thread != v_in) { 1363 _thread[old_rev_thread] = right; 1364 _rev_thread[right] = old_rev_thread; 1365 } 1366 1367 // Update _rev_thread using the new _thread values 1368 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1369 u = _dirty_revs[i]; 1370 _rev_thread[_thread[u]] = u; 1371 } 1372 1373 // Update _pred, _forward, _last_succ and _succ_num for the 1374 // stem nodes from u_out to u_in 1375 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1376 u = u_out; 1377 while (u != u_in) { 1378 w = _parent[u]; 1379 _pred[u] = _pred[w]; 1380 _forward[u] = !_forward[w]; 1381 tmp_sc += _succ_num[u] - _succ_num[w]; 1382 _succ_num[u] = tmp_sc; 1383 _last_succ[w] = tmp_ls; 1384 u = w; 1385 } 1386 _pred[u_in] = in_arc; 1387 _forward[u_in] = (u_in == _source[in_arc]); 1388 _succ_num[u_in] = old_succ_num; 1389 1390 // Set limits for updating _last_succ form v_in and v_out 1391 // towards the root 1392 int up_limit_in = -1; 1393 int up_limit_out = -1; 1394 if (_last_succ[join] == v_in) { 1395 up_limit_out = join; 1396 } else { 1397 up_limit_in = join; 1349 // Handle the case when old_rev_thread equals to v_in 1350 // (it also means that join and v_out coincide) 1351 int thread_continue = old_rev_thread == v_in ? 1352 _thread[old_last_succ] : _thread[v_in]; 1353 1354 // Update _thread and _parent along the stem nodes (i.e. the nodes 1355 // between u_in and u_out, whose parent have to be changed) 1356 int stem = u_in; // the current stem node 1357 int par_stem = v_in; // the new parent of stem 1358 int next_stem; // the next stem node 1359 int last = _last_succ[u_in]; // the last successor of stem 1360 int before, after = _thread[last]; 1361 _thread[v_in] = u_in; 1362 _dirty_revs.clear(); 1363 _dirty_revs.push_back(v_in); 1364 while (stem != u_out) { 1365 // Insert the next stem node into the thread list 1366 next_stem = _parent[stem]; 1367 _thread[last] = next_stem; 1368 _dirty_revs.push_back(last); 1369 1370 // Remove the subtree of stem from the thread list 1371 before = _rev_thread[stem]; 1372 _thread[before] = after; 1373 _rev_thread[after] = before; 1374 1375 // Change the parent node and shift stem nodes 1376 _parent[stem] = par_stem; 1377 par_stem = stem; 1378 stem = next_stem; 1379 1380 // Update last and after 1381 last = _last_succ[stem] == _last_succ[par_stem] ? 1382 _rev_thread[par_stem] : _last_succ[stem]; 1383 after = _thread[last]; 1384 } 1385 _parent[u_out] = par_stem; 1386 _thread[last] = thread_continue; 1387 _rev_thread[thread_continue] = last; 1388 _last_succ[u_out] = last; 1389 1390 // Remove the subtree of u_out from the thread list except for 1391 // the case when old_rev_thread equals to v_in 1392 if (old_rev_thread != v_in) { 1393 _thread[old_rev_thread] = after; 1394 _rev_thread[after] = old_rev_thread; 1395 } 1396 1397 // Update _rev_thread using the new _thread values 1398 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1399 int u = _dirty_revs[i]; 1400 _rev_thread[_thread[u]] = u; 1401 } 1402 1403 // Update _pred, _pred_dir, _last_succ and _succ_num for the 1404 // stem nodes from u_out to u_in 1405 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1406 for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { 1407 _pred[u] = _pred[p]; 1408 _pred_dir[u] = -_pred_dir[p]; 1409 tmp_sc += _succ_num[u] - _succ_num[p]; 1410 _succ_num[u] = tmp_sc; 1411 _last_succ[p] = tmp_ls; 1412 } 1413 _pred[u_in] = in_arc; 1414 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1415 _succ_num[u_in] = old_succ_num; 1398 1416 } 1399 1417 1400 1418 // Update _last_succ from v_in towards the root 1401 for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; 1402 u = _parent[u]) { 1403 _last_succ[u] = _last_succ[u_out]; 1404 } 1419 int up_limit_out = _last_succ[join] == v_in ? join : -1; 1420 int last_succ_out = _last_succ[u_out]; 1421 for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { 1422 _last_succ[u] = last_succ_out; 1423 } 1424 1405 1425 // Update _last_succ from v_out towards the root 1406 1426 if (join != old_rev_thread && v_in != old_rev_thread) { 1407 for ( u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;1427 for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1408 1428 u = _parent[u]) { 1409 1429 _last_succ[u] = old_rev_thread; 1410 1430 } 1411 } else { 1412 for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1431 } 1432 else if (last_succ_out != old_last_succ) { 1433 for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1413 1434 u = _parent[u]) { 1414 _last_succ[u] = _last_succ[u_out];1435 _last_succ[u] = last_succ_out; 1415 1436 } 1416 1437 } 1417 1438 1418 1439 // Update _succ_num from v_in to join 1419 for ( u = v_in; u != join; u = _parent[u]) {1440 for (int u = v_in; u != join; u = _parent[u]) { 1420 1441 _succ_num[u] += old_succ_num; 1421 1442 } 1422 1443 // Update _succ_num from v_out to join 1423 for ( u = v_out; u != join; u = _parent[u]) {1444 for (int u = v_out; u != join; u = _parent[u]) { 1424 1445 _succ_num[u] -= old_succ_num; 1425 1446 } 1426 1447 } 1427 1448 1428 // Update potentials 1449 // Update potentials in the subtree that has been moved 1429 1450 void updatePotential() { 1430 Cost sigma = _forward[u_in] ? 1431 _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : 1432 _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; 1433 // Update potentials in the subtree, which has been moved 1451 Cost sigma = _pi[v_in] - _pi[u_in] - 1452 _pred_dir[u_in] * _cost[in_arc]; 1434 1453 int end = _thread[_last_succ[u_in]]; 1435 1454 for (int u = u_in; u != end; u = _thread[u]) { -
lemon/path.h
r877 r920 44 44 /// 45 45 /// In a sense, the path can be treated as a list of arcs. The 46 /// lemonpath type stores just this list. As a consequence, it46 /// LEMON path type stores just this list. As a consequence, it 47 47 /// cannot enumerate the nodes of the path and the source node of 48 48 /// a zero length path is undefined. … … 136 136 void clear() { head.clear(); tail.clear(); } 137 137 138 /// \brief The n th arc.138 /// \brief The n-th arc. 139 139 /// 140 140 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 144 144 } 145 145 146 /// \brief Initialize arc iterator to point to the n th arc146 /// \brief Initialize arc iterator to point to the n-th arc 147 147 /// 148 148 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 232 232 /// 233 233 /// In a sense, the path can be treated as a list of arcs. The 234 /// lemonpath type stores just this list. As a consequence it234 /// LEMON path type stores just this list. As a consequence it 235 235 /// cannot enumerate the nodes in the path and the zero length paths 236 236 /// cannot store the source. … … 328 328 void clear() { data.clear(); } 329 329 330 /// \brief The n th arc.330 /// \brief The n-th arc. 331 331 /// 332 332 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 335 335 } 336 336 337 /// \brief Initializes arc iterator to point to the n th arc.337 /// \brief Initializes arc iterator to point to the n-th arc. 338 338 ArcIt nthIt(int n) const { 339 339 return ArcIt(*this, n); … … 396 396 /// 397 397 /// In a sense, the path can be treated as a list of arcs. The 398 /// lemonpath type stores just this list. As a consequence it398 /// LEMON path type stores just this list. As a consequence it 399 399 /// cannot enumerate the nodes in the path and the zero length paths 400 400 /// cannot store the source. … … 505 505 }; 506 506 507 /// \brief The n th arc.508 /// 509 /// This function looks for the n th arc in O(n) time.507 /// \brief The n-th arc. 508 /// 509 /// This function looks for the n-th arc in O(n) time. 510 510 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. 511 511 const Arc& nth(int n) const { … … 517 517 } 518 518 519 /// \brief Initializes arc iterator to point to the n th arc.519 /// \brief Initializes arc iterator to point to the n-th arc. 520 520 ArcIt nthIt(int n) const { 521 521 Node *node = first; … … 736 736 /// 737 737 /// In a sense, the path can be treated as a list of arcs. The 738 /// lemonpath type stores just this list. As a consequence it738 /// LEMON path type stores just this list. As a consequence it 739 739 /// cannot enumerate the nodes in the path and the source node of 740 740 /// a zero length path is undefined. … … 832 832 }; 833 833 834 /// \brief The n th arc.834 /// \brief The n-th arc. 835 835 /// 836 836 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 839 839 } 840 840 841 /// \brief The arc iterator pointing to the n th arc.841 /// \brief The arc iterator pointing to the n-th arc. 842 842 ArcIt nthIt(int n) const { 843 843 return ArcIt(*this, n); … … 1043 1043 /// 1044 1044 /// In a sense, the path can be treated as a list of arcs. The 1045 /// lemonpath type stores only this list. As a consequence, it1045 /// LEMON path type stores only this list. As a consequence, it 1046 1046 /// cannot enumerate the nodes in the path and the zero length paths 1047 1047 /// cannot have a source node. -
test/CMakeLists.txt
r964 r966 37 37 maps_test 38 38 matching_test 39 max_cardinality_search_test 40 max_clique_test 39 41 min_cost_arborescence_test 40 42 min_cost_flow_test 41 43 min_mean_cycle_test 44 nagamochi_ibaraki_test 42 45 path_test 43 46 planarity_test -
test/Makefile.am
r964 r966 35 35 test/maps_test \ 36 36 test/matching_test \ 37 test/max_cardinality_search_test \ 38 test/max_clique_test \ 37 39 test/min_cost_arborescence_test \ 38 40 test/min_cost_flow_test \ 39 41 test/min_mean_cycle_test \ 42 test/nagamochi_ibaraki_test \ 40 43 test/path_test \ 41 44 test/planarity_test \ … … 79 82 test_graph_test_SOURCES = test/graph_test.cc 80 83 test_graph_utils_test_SOURCES = test/graph_utils_test.cc 84 test_hao_orlin_test_SOURCES = test/hao_orlin_test.cc 81 85 test_heap_test_SOURCES = test/heap_test.cc 82 86 test_kruskal_test_SOURCES = test/kruskal_test.cc 83 test_hao_orlin_test_SOURCES = test/hao_orlin_test.cc84 87 test_lgf_test_SOURCES = test/lgf_test.cc 85 88 test_lp_test_SOURCES = test/lp_test.cc … … 87 90 test_mip_test_SOURCES = test/mip_test.cc 88 91 test_matching_test_SOURCES = test/matching_test.cc 92 test_max_cardinality_search_test_SOURCES = test/max_cardinality_search_test.cc 93 test_max_clique_test_SOURCES = test/max_clique_test.cc 89 94 test_min_cost_arborescence_test_SOURCES = test/min_cost_arborescence_test.cc 90 95 test_min_cost_flow_test_SOURCES = test/min_cost_flow_test.cc 91 96 test_min_mean_cycle_test_SOURCES = test/min_mean_cycle_test.cc 97 test_nagamochi_ibaraki_test_SOURCES = test/nagamochi_ibaraki_test.cc 92 98 test_path_test_SOURCES = test/path_test.cc 93 99 test_planarity_test_SOURCES = test/planarity_test.cc -
test/graph_copy_test.cc
r893 r894 19 19 #include <lemon/smart_graph.h> 20 20 #include <lemon/list_graph.h> 21 #include <lemon/static_graph.h> 21 22 #include <lemon/lgf_reader.h> 22 23 #include <lemon/error.h> … … 27 28 using namespace lemon; 28 29 30 template <typename GR> 29 31 void digraph_copy_test() { 30 32 const int nn = 10; … … 52 54 } 53 55 } 54 56 55 57 // Test digraph copy 56 ListDigraphto;57 ListDigraph::NodeMap<int> tnm(to);58 ListDigraph::ArcMap<int> tam(to);59 ListDigraph::Node tn;60 ListDigraph::Arc ta;61 62 SmartDigraph::NodeMap< ListDigraph::Node> nr(from);63 SmartDigraph::ArcMap< ListDigraph::Arc> er(from);64 65 ListDigraph::NodeMap<SmartDigraph::Node> ncr(to);66 ListDigraph::ArcMap<SmartDigraph::Arc> ecr(to);58 GR to; 59 typename GR::template NodeMap<int> tnm(to); 60 typename GR::template ArcMap<int> tam(to); 61 typename GR::Node tn; 62 typename GR::Arc ta; 63 64 SmartDigraph::NodeMap<typename GR::Node> nr(from); 65 SmartDigraph::ArcMap<typename GR::Arc> er(from); 66 67 typename GR::template NodeMap<SmartDigraph::Node> ncr(to); 68 typename GR::template ArcMap<SmartDigraph::Arc> ecr(to); 67 69 68 70 digraphCopy(from, to). … … 87 89 } 88 90 89 for ( ListDigraph::NodeIt it(to); it != INVALID; ++it) {91 for (typename GR::NodeIt it(to); it != INVALID; ++it) { 90 92 check(nr[ncr[it]] == it, "Wrong copy."); 91 93 } 92 94 93 for ( ListDigraph::ArcIt it(to); it != INVALID; ++it) {95 for (typename GR::ArcIt it(to); it != INVALID; ++it) { 94 96 check(er[ecr[it]] == it, "Wrong copy."); 95 97 } … … 104 106 } 105 107 108 template <typename GR> 106 109 void graph_copy_test() { 107 110 const int nn = 10; … … 136 139 137 140 // Test graph copy 138 ListGraphto;139 ListGraph::NodeMap<int> tnm(to);140 ListGraph::ArcMap<int> tam(to);141 ListGraph::EdgeMap<int> tem(to);142 ListGraph::Node tn;143 ListGraph::Arc ta;144 ListGraph::Edge te;145 146 SmartGraph::NodeMap< ListGraph::Node> nr(from);147 SmartGraph::ArcMap< ListGraph::Arc> ar(from);148 SmartGraph::EdgeMap< ListGraph::Edge> er(from);149 150 ListGraph::NodeMap<SmartGraph::Node> ncr(to);151 ListGraph::ArcMap<SmartGraph::Arc> acr(to);152 ListGraph::EdgeMap<SmartGraph::Edge> ecr(to);141 GR to; 142 typename GR::template NodeMap<int> tnm(to); 143 typename GR::template ArcMap<int> tam(to); 144 typename GR::template EdgeMap<int> tem(to); 145 typename GR::Node tn; 146 typename GR::Arc ta; 147 typename GR::Edge te; 148 149 SmartGraph::NodeMap<typename GR::Node> nr(from); 150 SmartGraph::ArcMap<typename GR::Arc> ar(from); 151 SmartGraph::EdgeMap<typename GR::Edge> er(from); 152 153 typename GR::template NodeMap<SmartGraph::Node> ncr(to); 154 typename GR::template ArcMap<SmartGraph::Arc> acr(to); 155 typename GR::template EdgeMap<SmartGraph::Edge> ecr(to); 153 156 154 157 graphCopy(from, to). … … 185 188 } 186 189 187 for ( ListGraph::NodeIt it(to); it != INVALID; ++it) {190 for (typename GR::NodeIt it(to); it != INVALID; ++it) { 188 191 check(nr[ncr[it]] == it, "Wrong copy."); 189 192 } 190 193 191 for ( ListGraph::ArcIt it(to); it != INVALID; ++it) {194 for (typename GR::ArcIt it(to); it != INVALID; ++it) { 192 195 check(ar[acr[it]] == it, "Wrong copy."); 193 196 } 194 for ( ListGraph::EdgeIt it(to); it != INVALID; ++it) {197 for (typename GR::EdgeIt it(to); it != INVALID; ++it) { 195 198 check(er[ecr[it]] == it, "Wrong copy."); 196 199 } … … 209 212 210 213 int main() { 211 digraph_copy_test(); 212 graph_copy_test(); 214 digraph_copy_test<SmartDigraph>(); 215 digraph_copy_test<ListDigraph>(); 216 digraph_copy_test<StaticDigraph>(); 217 graph_copy_test<SmartGraph>(); 218 graph_copy_test<ListGraph>(); 213 219 214 220 return 0;
Note: See TracChangeset
for help on using the changeset viewer.