Index: gtags
===================================================================
--- .hgtags (revision 1006)
+++ (revision )
@@ -1,2 +1,0 @@
-3ed8f7c8bed8f51aecdc8395aa11af2f1fa12d9f r1.2
-ffc2d2559fc916e90c3f000582a2d4761d229d44 r1.2.1
Index: CMakeLists.txt
===================================================================
--- CMakeLists.txt (revision 1055)
+++ CMakeLists.txt (revision 1056)
@@ -115,6 +115,4 @@
SET(LEMON_HAVE_LONG_LONG ${HAVE_LONG_LONG})
-INCLUDE(FindPythonInterp)
-
ENABLE_TESTING()
@@ -127,4 +125,5 @@
ADD_SUBDIRECTORY(lemon)
IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR})
+ ADD_SUBDIRECTORY(contrib)
ADD_SUBDIRECTORY(demo)
ADD_SUBDIRECTORY(tools)
Index: NEWS
===================================================================
--- NEWS (revision 1005)
+++ NEWS (revision 962)
@@ -1,14 +1,2 @@
-2010-10-21 Version 1.2.1 released
-
- Bugfix release.
-
- #366: Fix Pred[Matrix]MapPath::empty()
- #371: Bug fix in (di)graphCopy()
- The target graph is cleared before adding nodes and arcs/edges.
-
- #364: Add missing UndirectedTags
- #368: Fix the usage of std::numeric_limits<>::min() in Network Simplex
- #372: Fix a critical bug in preflow
-
2010-03-19 Version 1.2 released
Index: contrib/CMakeLists.txt
===================================================================
--- contrib/CMakeLists.txt (revision 1031)
+++ contrib/CMakeLists.txt (revision 1031)
@@ -0,0 +1,19 @@
+INCLUDE_DIRECTORIES(
+ ${PROJECT_SOURCE_DIR}
+ ${PROJECT_BINARY_DIR}
+)
+
+LINK_DIRECTORIES(
+ ${PROJECT_BINARY_DIR}/lemon
+)
+
+# Uncomment (and adjust) the following two lines. 'myprog' is the name
+# of the final executable ('.exe' will automatically be added to the
+# name on Windows) and 'myprog-main.cc' is the source code it is
+# compiled from. You can add more source files separated by
+# whitespaces. Moreover, you can add multiple similar blocks if you
+# want to build more than one executables.
+
+# ADD_EXECUTABLE(myprog myprog-main.cc)
+# TARGET_LINK_LIBRARIES(myprog lemon)
+
Index: doc/Doxyfile.in
===================================================================
--- doc/Doxyfile.in (revision 1039)
+++ doc/Doxyfile.in (revision 1040)
@@ -96,4 +96,5 @@
"@abs_top_srcdir@/lemon/concepts" \
"@abs_top_srcdir@/demo" \
+ "@abs_top_srcdir@/contrib" \
"@abs_top_srcdir@/tools" \
"@abs_top_srcdir@/test/test_tools.h" \
Index: doc/coding_style.dox
===================================================================
--- doc/coding_style.dox (revision 463)
+++ doc/coding_style.dox (revision 1023)
@@ -99,8 +99,8 @@
\subsection pri-loc-var Private member variables
-Private member variables should start with underscore
+Private member variables should start with underscore.
\code
-_start_with_underscores
+_start_with_underscore
\endcode
Index: doc/dirs.dox
===================================================================
--- doc/dirs.dox (revision 463)
+++ doc/dirs.dox (revision 1031)
@@ -32,4 +32,17 @@
documentation.
*/
+
+/**
+\dir contrib
+\brief Directory for user contributed source codes.
+
+You can place your own C++ code using LEMON into this directory, which
+will compile to an executable along with LEMON when you build the
+library. This is probably the easiest way of compiling short to medium
+codes, for this does require neither a LEMON installed system-wide nor
+adding several paths to the compiler.
+
+Please have a look at contrib/CMakeLists.txt for
+instruction on how to add your own files into the build process. */
/**
Index: doc/groups.dox
===================================================================
--- doc/groups.dox (revision 963)
+++ doc/groups.dox (revision 1023)
@@ -287,4 +287,12 @@
/**
+@defgroup matrices Matrices
+@ingroup auxdat
+\brief Two dimensional data storages implemented in LEMON.
+
+This group contains two dimensional data storages implemented in LEMON.
+*/
+
+/**
@defgroup algs Algorithms
\brief This group contains the several algorithms
@@ -319,4 +327,8 @@
but the digraph should not contain directed cycles with negative total
length.
+ - \ref FloydWarshall "Floyd-Warshall" and \ref Johnson "Johnson" algorithms
+ for solving the \e all-pairs \e shortest \e paths \e problem when arc
+ lenghts can be either positive or negative, but the digraph should
+ not contain directed cycles with negative total length.
- \ref Suurballe A successive shortest path algorithm for finding
arc-disjoint paths between two nodes having minimum total length.
@@ -352,8 +364,18 @@
\f[ 0 \leq f(uv) \leq cap(uv) \quad \forall uv\in A \f]
-\ref Preflow is an efficient implementation of Goldberg-Tarjan's
-preflow push-relabel algorithm \ref goldberg88newapproach for finding
-maximum flows. It also provides functions to query the minimum cut,
-which is the dual problem of maximum flow.
+LEMON contains several algorithms for solving maximum flow problems:
+- \ref EdmondsKarp Edmonds-Karp algorithm
+ \ref edmondskarp72theoretical.
+- \ref Preflow Goldberg-Tarjan's preflow push-relabel algorithm
+ \ref goldberg88newapproach.
+- \ref DinitzSleatorTarjan Dinitz's blocking flow algorithm with dynamic trees
+ \ref dinic70algorithm, \ref sleator83dynamic.
+- \ref GoldbergTarjan !Preflow push-relabel algorithm with dynamic trees
+ \ref goldberg88newapproach, \ref sleator83dynamic.
+
+In most cases the \ref Preflow algorithm provides the
+fastest method for computing a maximum flow. All implementations
+also provide functions to query the minimum cut, which is the dual
+problem of maximum flow.
\ref Circulation is a preflow push-relabel algorithm implemented directly
@@ -385,8 +407,8 @@
strongly polynomial \ref klein67primal, \ref goldberg89cyclecanceling.
-In general NetworkSimplex is the most efficient implementation,
-but in special cases other algorithms could be faster.
+In general, \ref NetworkSimplex and \ref CostScaling are the most efficient
+implementations, but the other two algorithms could be faster in special cases.
For example, if the total supply and/or capacities are rather small,
-CapacityScaling is usually the fastest algorithm (without effective scaling).
+\ref CapacityScaling is usually the fastest algorithm (without effective scaling).
*/
@@ -412,4 +434,6 @@
- \ref HaoOrlin "Hao-Orlin algorithm" for calculating minimum cut
in directed graphs.
+- \ref NagamochiIbaraki "Nagamochi-Ibaraki algorithm" for
+ calculating minimum cut in undirected graphs.
- \ref GomoryHu "Gomory-Hu tree computation" for calculating
all-pairs minimum cut in undirected graphs.
@@ -448,5 +472,5 @@
\ref dasdan98minmeancycle.
-In practice, the \ref HowardMmc "Howard" algorithm proved to be by far the
+In practice, the \ref HowardMmc "Howard" algorithm turned out to be by far the
most efficient one, though the best known theoretical bound on its running
time is exponential.
@@ -474,4 +498,14 @@
The matching algorithms implemented in LEMON:
+- \ref MaxBipartiteMatching Hopcroft-Karp augmenting path algorithm
+ for calculating maximum cardinality matching in bipartite graphs.
+- \ref PrBipartiteMatching Push-relabel algorithm
+ for calculating maximum cardinality matching in bipartite graphs.
+- \ref MaxWeightedBipartiteMatching
+ Successive shortest path algorithm for calculating maximum weighted
+ matching and maximum weighted bipartite matching in bipartite graphs.
+- \ref MinCostMaxBipartiteMatching
+ Successive shortest path algorithm for calculating minimum cost maximum
+ matching in bipartite graphs.
- \ref MaxMatching Edmond's blossom shrinking algorithm for calculating
maximum cardinality matching in general graphs.
@@ -506,5 +540,5 @@
/**
-@defgroup planar Planarity Embedding and Drawing
+@defgroup planar Planar Embedding and Drawing
@ingroup algs
\brief Algorithms for planarity checking, embedding and drawing
@@ -515,4 +549,17 @@
\image html planar.png
\image latex planar.eps "Plane graph" width=\textwidth
+*/
+
+/**
+@defgroup approx_algs Approximation Algorithms
+@ingroup algs
+\brief Approximation algorithms.
+
+This group contains the approximation and heuristic algorithms
+implemented in LEMON.
+
+Maximum Clique Problem
+ - \ref GrossoLocatelliPullanMc An efficient heuristic algorithm of
+ Grosso, Locatelli, and Pullan.
*/
@@ -546,4 +593,21 @@
The currently supported solvers are \ref glpk, \ref clp, \ref cbc,
\ref cplex, \ref soplex.
+*/
+
+/**
+@defgroup lp_utils Tools for Lp and Mip Solvers
+@ingroup lp_group
+\brief Helper tools to the Lp and Mip solvers.
+
+This group adds some helper tools to general optimization framework
+implemented in LEMON.
+*/
+
+/**
+@defgroup metah Metaheuristics
+@ingroup gen_opt_group
+\brief Metaheuristics for LEMON library.
+
+This group contains some metaheuristic optimization tools.
*/
Index: doc/references.bib
===================================================================
--- doc/references.bib (revision 802)
+++ doc/references.bib (revision 999)
@@ -298,4 +298,17 @@
address = {Dublin, Ireland},
year = 1991,
- month = sep,
-}
+ month = sep
+}
+
+%%%%% Other algorithms %%%%%
+
+@article{grosso08maxclique,
+ author = {Andrea Grosso and Marco Locatelli and Wayne Pullan},
+ title = {Simple ingredients leading to very efficient
+ heuristics for the maximum clique problem},
+ journal = {Journal of Heuristics},
+ year = 2008,
+ volume = 14,
+ number = 6,
+ pages = {587--612}
+}
Index: lemon/Makefile.am
===================================================================
--- lemon/Makefile.am (revision 953)
+++ lemon/Makefile.am (revision 1021)
@@ -91,4 +91,5 @@
lemon/graph_to_eps.h \
lemon/grid_graph.h \
+ lemon/grosso_locatelli_pullan_mc.h \
lemon/hartmann_orlin_mmc.h \
lemon/howard_mmc.h \
@@ -107,4 +108,6 @@
lemon/math.h \
lemon/min_cost_arborescence.h \
+ lemon/max_cardinality_search.h \
+ lemon/nagamochi_ibaraki.h \
lemon/nauty_reader.h \
lemon/network_simplex.h \
Index: lemon/capacity_scaling.h
===================================================================
--- lemon/capacity_scaling.h (revision 956)
+++ lemon/capacity_scaling.h (revision 1026)
@@ -87,8 +87,9 @@
/// consider to use the named template parameters instead.
///
- /// \warning Both number types must be signed and all input data must
+ /// \warning Both \c V and \c C must be signed number types.
+ /// \warning All input data (capacities, supply values, and costs) must
/// be integer.
- /// \warning This algorithm does not support negative costs for such
- /// arcs that have infinite upper bound.
+ /// \warning This algorithm does not support negative costs for
+ /// arcs having infinite upper bound.
#ifdef DOXYGEN
template
@@ -423,5 +424,5 @@
///
/// Using this function has the same effect as using \ref supplyMap()
- /// with such a map in which \c k is assigned to \c s, \c -k is
+ /// with a map in which \c k is assigned to \c s, \c -k is
/// assigned to \c t and all other nodes have zero supply value.
///
Index: lemon/core.h
===================================================================
--- lemon/core.h (revision 986)
+++ lemon/core.h (revision 1023)
@@ -447,4 +447,23 @@
}
+
+ /// \brief Check whether a graph is undirected.
+ ///
+ /// This function returns \c true if the given graph is undirected.
+#ifdef DOXYGEN
+ template
+ bool undirected(const GR& g) { return false; }
+#else
+ template
+ typename enable_if, bool>::type
+ undirected(const GR&) {
+ return true;
+ }
+ template
+ typename disable_if, bool>::type
+ undirected(const GR&) {
+ return false;
+ }
+#endif
/// \brief Class to copy a digraph.
Index: lemon/cost_scaling.h
===================================================================
--- lemon/cost_scaling.h (revision 1041)
+++ lemon/cost_scaling.h (revision 1049)
@@ -98,4 +98,7 @@
/// "preflow push-relabel" algorithm for the maximum flow problem.
///
+ /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest
+ /// implementations available in LEMON for this problem.
+ ///
/// Most of the parameters of the problem (except for the digraph)
/// can be given using separate functions, and the algorithm can be
@@ -114,8 +117,9 @@
/// consider to use the named template parameters instead.
///
- /// \warning Both number types must be signed and all input data must
+ /// \warning Both \c V and \c C must be signed number types.
+ /// \warning All input data (capacities, supply values, and costs) must
/// be integer.
- /// \warning This algorithm does not support negative costs for such
- /// arcs that have infinite upper bound.
+ /// \warning This algorithm does not support negative costs for
+ /// arcs having infinite upper bound.
///
/// \note %CostScaling provides three different internal methods,
@@ -179,5 +183,5 @@
/// relabel operation.
/// By default, the so called \ref PARTIAL_AUGMENT
- /// "Partial Augment-Relabel" method is used, which proved to be
+ /// "Partial Augment-Relabel" method is used, which turned out to be
/// the most efficient and the most robust on various test inputs.
/// However, the other methods can be selected using the \ref run()
@@ -234,5 +238,4 @@
};
- typedef StaticVectorMap LargeCostNodeMap;
typedef StaticVectorMap LargeCostArcMap;
@@ -285,12 +288,4 @@
int _max_rank;
- // Data for a StaticDigraph structure
- typedef std::pair IntPair;
- StaticDigraph _sgr;
- std::vector _arc_vec;
- std::vector _cost_vec;
- LargeCostArcMap _cost_map;
- LargeCostNodeMap _pi_map;
-
public:
@@ -339,5 +334,4 @@
CostScaling(const GR& graph) :
_graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph),
- _cost_map(_cost_vec), _pi_map(_pi),
INF(std::numeric_limits::has_infinity ?
std::numeric_limits::infinity() :
@@ -448,5 +442,5 @@
///
/// Using this function has the same effect as using \ref supplyMap()
- /// with such a map in which \c k is assigned to \c s, \c -k is
+ /// with a map in which \c k is assigned to \c s, \c -k is
/// assigned to \c t and all other nodes have zero supply value.
///
@@ -494,5 +488,5 @@
/// \param method The internal method that will be used in the
/// algorithm. For more information, see \ref Method.
- /// \param factor The cost scaling factor. It must be larger than one.
+ /// \param factor The cost scaling factor. It must be at least two.
///
/// \return \c INFEASIBLE if no feasible flow exists,
@@ -508,5 +502,6 @@
/// \see ProblemType, Method
/// \see resetParams(), reset()
- ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) {
+ ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) {
+ LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2");
_alpha = factor;
ProblemType pt = init();
@@ -572,16 +567,23 @@
}
- /// \brief Reset all the parameters that have been given before.
- ///
- /// This function resets all the paramaters that have been given
- /// before using functions \ref lowerMap(), \ref upperMap(),
- /// \ref costMap(), \ref supplyMap(), \ref stSupply().
- ///
- /// It is useful for multiple run() calls. If this function is not
- /// used, all the parameters given before are kept for the next
- /// \ref run() call.
- /// However, the underlying digraph must not be modified after this
- /// class have been constructed, since it copies and extends the graph.
+ /// \brief Reset the internal data structures and all the parameters
+ /// that have been given before.
+ ///
+ /// This function resets the internal data structures and all the
+ /// paramaters that have been given before using functions \ref lowerMap(),
+ /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply().
+ ///
+ /// It is useful for multiple \ref run() calls. By default, all the given
+ /// parameters are kept for the next \ref run() call, unless
+ /// \ref resetParams() or \ref reset() is used.
+ /// If the underlying digraph was also modified after the construction
+ /// of the class or the last \ref reset() call, then the \ref reset()
+ /// function must be used, otherwise \ref resetParams() is sufficient.
+ ///
+ /// See \ref resetParams() for examples.
+ ///
/// \return (*this)
+ ///
+ /// \see resetParams(), run()
CostScaling& reset() {
// Resize vectors
@@ -608,7 +610,4 @@
_excess.resize(_res_node_num);
_next_out.resize(_res_node_num);
-
- _arc_vec.reserve(_res_arc_num);
- _cost_vec.reserve(_res_arc_num);
// Copy the graph
@@ -887,12 +886,4 @@
}
- return OPTIMAL;
- }
-
- // Execute the algorithm and transform the results
- void start(Method method) {
- // Maximum path length for partial augment
- const int MAX_PATH_LENGTH = 4;
-
// Initialize data structures for buckets
_max_rank = _alpha * _res_node_num;
@@ -902,5 +893,11 @@
_rank.resize(_res_node_num + 1);
- // Execute the algorithm
+ return OPTIMAL;
+ }
+
+ // Execute the algorithm and transform the results
+ void start(Method method) {
+ const int MAX_PARTIAL_PATH_LENGTH = 4;
+
switch (method) {
case PUSH:
@@ -911,24 +908,65 @@
break;
case PARTIAL_AUGMENT:
- startAugment(MAX_PATH_LENGTH);
+ startAugment(MAX_PARTIAL_PATH_LENGTH);
break;
}
- // Compute node potentials for the original costs
- _arc_vec.clear();
- _cost_vec.clear();
- for (int j = 0; j != _res_arc_num; ++j) {
- if (_res_cap[j] > 0) {
- _arc_vec.push_back(IntPair(_source[j], _target[j]));
- _cost_vec.push_back(_scost[j]);
- }
- }
- _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
-
- typename BellmanFord
- ::template SetDistMap::Create bf(_sgr, _cost_map);
- bf.distMap(_pi_map);
- bf.init(0);
- bf.start();
+ // Compute node potentials (dual solution)
+ for (int i = 0; i != _res_node_num; ++i) {
+ _pi[i] = static_cast(_pi[i] / (_res_node_num * _alpha));
+ }
+ bool optimal = true;
+ for (int i = 0; optimal && i != _res_node_num; ++i) {
+ LargeCost pi_i = _pi[i];
+ int last_out = _first_out[i+1];
+ for (int j = _first_out[i]; j != last_out; ++j) {
+ if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) {
+ optimal = false;
+ break;
+ }
+ }
+ }
+
+ if (!optimal) {
+ // Compute node potentials for the original costs with BellmanFord
+ // (if it is necessary)
+ typedef std::pair IntPair;
+ StaticDigraph sgr;
+ std::vector arc_vec;
+ std::vector cost_vec;
+ LargeCostArcMap cost_map(cost_vec);
+
+ arc_vec.clear();
+ cost_vec.clear();
+ for (int j = 0; j != _res_arc_num; ++j) {
+ if (_res_cap[j] > 0) {
+ int u = _source[j], v = _target[j];
+ arc_vec.push_back(IntPair(u, v));
+ cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]);
+ }
+ }
+ sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end());
+
+ typename BellmanFord::Create
+ bf(sgr, cost_map);
+ bf.init(0);
+ bf.start();
+
+ for (int i = 0; i != _res_node_num; ++i) {
+ _pi[i] += bf.dist(sgr.node(i));
+ }
+ }
+
+ // Shift potentials to meet the requirements of the GEQ type
+ // optimality conditions
+ LargeCost max_pot = _pi[_root];
+ for (int i = 0; i != _res_node_num; ++i) {
+ if (_pi[i] > max_pot) max_pot = _pi[i];
+ }
+ if (max_pot != 0) {
+ for (int i = 0; i != _res_node_num; ++i) {
+ _pi[i] -= max_pot;
+ }
+ }
// Handle non-zero lower bounds
@@ -948,11 +986,13 @@
LargeCost pi_u = _pi[u];
for (int a = _first_out[u]; a != last_out; ++a) {
- int v = _target[a];
- if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) {
- Value delta = _res_cap[a];
- _excess[u] -= delta;
- _excess[v] += delta;
- _res_cap[a] = 0;
- _res_cap[_reverse[a]] += delta;
+ Value delta = _res_cap[a];
+ if (delta > 0) {
+ int v = _target[a];
+ if (_cost[a] + pi_u - _pi[v] < 0) {
+ _excess[u] -= delta;
+ _excess[v] += delta;
+ _res_cap[a] = 0;
+ _res_cap[_reverse[a]] += delta;
+ }
}
}
@@ -970,33 +1010,232 @@
}
- // Early termination heuristic
- bool earlyTermination() {
- const double EARLY_TERM_FACTOR = 3.0;
-
- // Build a static residual graph
- _arc_vec.clear();
- _cost_vec.clear();
- for (int j = 0; j != _res_arc_num; ++j) {
- if (_res_cap[j] > 0) {
- _arc_vec.push_back(IntPair(_source[j], _target[j]));
- _cost_vec.push_back(_cost[j] + 1);
- }
- }
- _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
-
- // Run Bellman-Ford algorithm to check if the current flow is optimal
- BellmanFord bf(_sgr, _cost_map);
- bf.init(0);
- bool done = false;
- int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num)));
- for (int i = 0; i < K && !done; ++i) {
- done = bf.processNextWeakRound();
- }
- return done;
+ // Price (potential) refinement heuristic
+ bool priceRefinement() {
+
+ // Stack for stroing the topological order
+ IntVector stack(_res_node_num);
+ int stack_top;
+
+ // Perform phases
+ while (topologicalSort(stack, stack_top)) {
+
+ // Compute node ranks in the acyclic admissible network and
+ // store the nodes in buckets
+ for (int i = 0; i != _res_node_num; ++i) {
+ _rank[i] = 0;
+ }
+ const int bucket_end = _root + 1;
+ for (int r = 0; r != _max_rank; ++r) {
+ _buckets[r] = bucket_end;
+ }
+ int top_rank = 0;
+ for ( ; stack_top >= 0; --stack_top) {
+ int u = stack[stack_top], v;
+ int rank_u = _rank[u];
+
+ LargeCost rc, pi_u = _pi[u];
+ int last_out = _first_out[u+1];
+ for (int a = _first_out[u]; a != last_out; ++a) {
+ if (_res_cap[a] > 0) {
+ v = _target[a];
+ rc = _cost[a] + pi_u - _pi[v];
+ if (rc < 0) {
+ LargeCost nrc = static_cast((-rc - 0.5) / _epsilon);
+ if (nrc < LargeCost(_max_rank)) {
+ int new_rank_v = rank_u + static_cast(nrc);
+ if (new_rank_v > _rank[v]) {
+ _rank[v] = new_rank_v;
+ }
+ }
+ }
+ }
+ }
+
+ if (rank_u > 0) {
+ top_rank = std::max(top_rank, rank_u);
+ int bfirst = _buckets[rank_u];
+ _bucket_next[u] = bfirst;
+ _bucket_prev[bfirst] = u;
+ _buckets[rank_u] = u;
+ }
+ }
+
+ // Check if the current flow is epsilon-optimal
+ if (top_rank == 0) {
+ return true;
+ }
+
+ // Process buckets in top-down order
+ for (int rank = top_rank; rank > 0; --rank) {
+ while (_buckets[rank] != bucket_end) {
+ // Remove the first node from the current bucket
+ int u = _buckets[rank];
+ _buckets[rank] = _bucket_next[u];
+
+ // Search the outgoing arcs of u
+ LargeCost rc, pi_u = _pi[u];
+ int last_out = _first_out[u+1];
+ int v, old_rank_v, new_rank_v;
+ for (int a = _first_out[u]; a != last_out; ++a) {
+ if (_res_cap[a] > 0) {
+ v = _target[a];
+ old_rank_v = _rank[v];
+
+ if (old_rank_v < rank) {
+
+ // Compute the new rank of node v
+ rc = _cost[a] + pi_u - _pi[v];
+ if (rc < 0) {
+ new_rank_v = rank;
+ } else {
+ LargeCost nrc = rc / _epsilon;
+ new_rank_v = 0;
+ if (nrc < LargeCost(_max_rank)) {
+ new_rank_v = rank - 1 - static_cast(nrc);
+ }
+ }
+
+ // Change the rank of node v
+ if (new_rank_v > old_rank_v) {
+ _rank[v] = new_rank_v;
+
+ // Remove v from its old bucket
+ if (old_rank_v > 0) {
+ if (_buckets[old_rank_v] == v) {
+ _buckets[old_rank_v] = _bucket_next[v];
+ } else {
+ int pv = _bucket_prev[v], nv = _bucket_next[v];
+ _bucket_next[pv] = nv;
+ _bucket_prev[nv] = pv;
+ }
+ }
+
+ // Insert v into its new bucket
+ int nv = _buckets[new_rank_v];
+ _bucket_next[v] = nv;
+ _bucket_prev[nv] = v;
+ _buckets[new_rank_v] = v;
+ }
+ }
+ }
+ }
+
+ // Refine potential of node u
+ _pi[u] -= rank * _epsilon;
+ }
+ }
+
+ }
+
+ return false;
+ }
+
+ // Find and cancel cycles in the admissible network and
+ // determine topological order using DFS
+ bool topologicalSort(IntVector &stack, int &stack_top) {
+ const int MAX_CYCLE_CANCEL = 1;
+
+ BoolVector reached(_res_node_num, false);
+ BoolVector processed(_res_node_num, false);
+ IntVector pred(_res_node_num);
+ for (int i = 0; i != _res_node_num; ++i) {
+ _next_out[i] = _first_out[i];
+ }
+ stack_top = -1;
+
+ int cycle_cnt = 0;
+ for (int start = 0; start != _res_node_num; ++start) {
+ if (reached[start]) continue;
+
+ // Start DFS search from this start node
+ pred[start] = -1;
+ int tip = start, v;
+ while (true) {
+ // Check the outgoing arcs of the current tip node
+ reached[tip] = true;
+ LargeCost pi_tip = _pi[tip];
+ int a, last_out = _first_out[tip+1];
+ for (a = _next_out[tip]; a != last_out; ++a) {
+ if (_res_cap[a] > 0) {
+ v = _target[a];
+ if (_cost[a] + pi_tip - _pi[v] < 0) {
+ if (!reached[v]) {
+ // A new node is reached
+ reached[v] = true;
+ pred[v] = tip;
+ _next_out[tip] = a;
+ tip = v;
+ a = _next_out[tip];
+ last_out = _first_out[tip+1];
+ break;
+ }
+ else if (!processed[v]) {
+ // A cycle is found
+ ++cycle_cnt;
+ _next_out[tip] = a;
+
+ // Find the minimum residual capacity along the cycle
+ Value d, delta = _res_cap[a];
+ int u, delta_node = tip;
+ for (u = tip; u != v; ) {
+ u = pred[u];
+ d = _res_cap[_next_out[u]];
+ if (d <= delta) {
+ delta = d;
+ delta_node = u;
+ }
+ }
+
+ // Augment along the cycle
+ _res_cap[a] -= delta;
+ _res_cap[_reverse[a]] += delta;
+ for (u = tip; u != v; ) {
+ u = pred[u];
+ int ca = _next_out[u];
+ _res_cap[ca] -= delta;
+ _res_cap[_reverse[ca]] += delta;
+ }
+
+ // Check the maximum number of cycle canceling
+ if (cycle_cnt >= MAX_CYCLE_CANCEL) {
+ return false;
+ }
+
+ // Roll back search to delta_node
+ if (delta_node != tip) {
+ for (u = tip; u != delta_node; u = pred[u]) {
+ reached[u] = false;
+ }
+ tip = delta_node;
+ a = _next_out[tip] + 1;
+ last_out = _first_out[tip+1];
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Step back to the previous node
+ if (a == last_out) {
+ processed[tip] = true;
+ stack[++stack_top] = tip;
+ tip = pred[tip];
+ if (tip < 0) {
+ // Finish DFS from the current start node
+ break;
+ }
+ ++_next_out[tip];
+ }
+ }
+
+ }
+
+ return (cycle_cnt == 0);
}
// Global potential update heuristic
void globalUpdate() {
- int bucket_end = _root + 1;
+ const int bucket_end = _root + 1;
// Initialize buckets
@@ -1005,10 +1244,11 @@
}
Value total_excess = 0;
+ int b0 = bucket_end;
for (int i = 0; i != _res_node_num; ++i) {
if (_excess[i] < 0) {
_rank[i] = 0;
- _bucket_next[i] = _buckets[0];
- _bucket_prev[_buckets[0]] = i;
- _buckets[0] = i;
+ _bucket_next[i] = b0;
+ _bucket_prev[b0] = i;
+ b0 = i;
} else {
total_excess += _excess[i];
@@ -1017,4 +1257,5 @@
}
if (total_excess == 0) return;
+ _buckets[0] = b0;
// Search the buckets
@@ -1038,6 +1279,7 @@
LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon;
int new_rank_v = old_rank_v;
- if (nrc < LargeCost(_max_rank))
- new_rank_v = r + 1 + int(nrc);
+ if (nrc < LargeCost(_max_rank)) {
+ new_rank_v = r + 1 + static_cast(nrc);
+ }
// Change the rank of v
@@ -1051,12 +1293,14 @@
_buckets[old_rank_v] = _bucket_next[v];
} else {
- _bucket_next[_bucket_prev[v]] = _bucket_next[v];
- _bucket_prev[_bucket_next[v]] = _bucket_prev[v];
+ int pv = _bucket_prev[v], nv = _bucket_next[v];
+ _bucket_next[pv] = nv;
+ _bucket_prev[nv] = pv;
}
}
- // Insert v to its new bucket
- _bucket_next[v] = _buckets[new_rank_v];
- _bucket_prev[_buckets[new_rank_v]] = v;
+ // Insert v into its new bucket
+ int nv = _buckets[new_rank_v];
+ _bucket_next[v] = nv;
+ _bucket_prev[nv] = v;
_buckets[new_rank_v] = v;
}
@@ -1087,21 +1331,23 @@
void startAugment(int max_length) {
// Paramters for heuristics
- const int EARLY_TERM_EPSILON_LIMIT = 1000;
- const double GLOBAL_UPDATE_FACTOR = 3.0;
-
- const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
+ const int PRICE_REFINEMENT_LIMIT = 2;
+ const double GLOBAL_UPDATE_FACTOR = 1.0;
+ const int global_update_skip = static_cast(GLOBAL_UPDATE_FACTOR *
(_res_node_num + _sup_node_num * _sup_node_num));
- int next_update_limit = global_update_freq;
-
+ int next_global_update_limit = global_update_skip;
+
+ // Perform cost scaling phases
+ IntVector path;
+ BoolVector path_arc(_res_arc_num, false);
int relabel_cnt = 0;
-
- // Perform cost scaling phases
- std::vector path;
+ int eps_phase_cnt = 0;
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
1 : _epsilon / _alpha )
{
- // Early termination heuristic
- if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
- if (earlyTermination()) break;
+ ++eps_phase_cnt;
+
+ // Price refinement heuristic
+ if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
+ if (priceRefinement()) continue;
}
@@ -1120,30 +1366,43 @@
// Find an augmenting path from the start node
- path.clear();
int tip = start;
- while (_excess[tip] >= 0 && int(path.size()) < max_length) {
+ while (int(path.size()) < max_length && _excess[tip] >= 0) {
int u;
- LargeCost min_red_cost, rc, pi_tip = _pi[tip];
+ LargeCost rc, min_red_cost = std::numeric_limits::max();
+ LargeCost pi_tip = _pi[tip];
int last_out = _first_out[tip+1];
for (int a = _next_out[tip]; a != last_out; ++a) {
- u = _target[a];
- if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) {
- path.push_back(a);
- _next_out[tip] = a;
- tip = u;
- goto next_step;
+ if (_res_cap[a] > 0) {
+ u = _target[a];
+ rc = _cost[a] + pi_tip - _pi[u];
+ if (rc < 0) {
+ path.push_back(a);
+ _next_out[tip] = a;
+ if (path_arc[a]) {
+ goto augment; // a cycle is found, stop path search
+ }
+ tip = u;
+ path_arc[a] = true;
+ goto next_step;
+ }
+ else if (rc < min_red_cost) {
+ min_red_cost = rc;
+ }
}
}
// Relabel tip node
- min_red_cost = std::numeric_limits::max();
if (tip != start) {
int ra = _reverse[path.back()];
- min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]];
+ min_red_cost =
+ std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]);
}
+ last_out = _next_out[tip];
for (int a = _first_out[tip]; a != last_out; ++a) {
- rc = _cost[a] + pi_tip - _pi[_target[a]];
- if (_res_cap[a] > 0 && rc < min_red_cost) {
- min_red_cost = rc;
+ if (_res_cap[a] > 0) {
+ rc = _cost[a] + pi_tip - _pi[_target[a]];
+ if (rc < min_red_cost) {
+ min_red_cost = rc;
+ }
}
}
@@ -1154,5 +1413,7 @@
// Step back
if (tip != start) {
- tip = _source[path.back()];
+ int pa = path.back();
+ path_arc[pa] = false;
+ tip = _source[pa];
path.pop_back();
}
@@ -1162,4 +1423,5 @@
// Augment along the found path (as much flow as possible)
+ augment:
Value delta;
int pa, u, v = start;
@@ -1168,4 +1430,5 @@
u = v;
v = _target[pa];
+ path_arc[pa] = false;
delta = std::min(_res_cap[pa], _excess[u]);
_res_cap[pa] -= delta;
@@ -1173,15 +1436,19 @@
_excess[u] -= delta;
_excess[v] += delta;
- if (_excess[v] > 0 && _excess[v] <= delta)
+ if (_excess[v] > 0 && _excess[v] <= delta) {
_active_nodes.push_back(v);
- }
+ }
+ }
+ path.clear();
// Global update heuristic
- if (relabel_cnt >= next_update_limit) {
+ if (relabel_cnt >= next_global_update_limit) {
globalUpdate();
- next_update_limit += global_update_freq;
- }
- }
- }
+ next_global_update_limit += global_update_skip;
+ }
+ }
+
+ }
+
}
@@ -1189,22 +1456,24 @@
void startPush() {
// Paramters for heuristics
- const int EARLY_TERM_EPSILON_LIMIT = 1000;
+ const int PRICE_REFINEMENT_LIMIT = 2;
const double GLOBAL_UPDATE_FACTOR = 2.0;
- const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
+ const int global_update_skip = static_cast(GLOBAL_UPDATE_FACTOR *
(_res_node_num + _sup_node_num * _sup_node_num));
- int next_update_limit = global_update_freq;
-
- int relabel_cnt = 0;
+ int next_global_update_limit = global_update_skip;
// Perform cost scaling phases
BoolVector hyper(_res_node_num, false);
LargeCostVector hyper_cost(_res_node_num);
+ int relabel_cnt = 0;
+ int eps_phase_cnt = 0;
for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
1 : _epsilon / _alpha )
{
- // Early termination heuristic
- if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
- if (earlyTermination()) break;
+ ++eps_phase_cnt;
+
+ // Price refinement heuristic
+ if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
+ if (priceRefinement()) continue;
}
@@ -1278,7 +1547,9 @@
std::numeric_limits::max();
for (int a = _first_out[n]; a != last_out; ++a) {
- rc = _cost[a] + pi_n - _pi[_target[a]];
- if (_res_cap[a] > 0 && rc < min_red_cost) {
- min_red_cost = rc;
+ if (_res_cap[a] > 0) {
+ rc = _cost[a] + pi_n - _pi[_target[a]];
+ if (rc < min_red_cost) {
+ min_red_cost = rc;
+ }
}
}
@@ -1298,9 +1569,9 @@
// Global update heuristic
- if (relabel_cnt >= next_update_limit) {
+ if (relabel_cnt >= next_global_update_limit) {
globalUpdate();
for (int u = 0; u != _res_node_num; ++u)
hyper[u] = false;
- next_update_limit += global_update_freq;
+ next_global_update_limit += global_update_skip;
}
}
Index: lemon/cycle_canceling.h
===================================================================
--- lemon/cycle_canceling.h (revision 956)
+++ lemon/cycle_canceling.h (revision 1026)
@@ -66,8 +66,9 @@
/// algorithm. By default, it is the same as \c V.
///
- /// \warning Both number types must be signed and all input data must
+ /// \warning Both \c V and \c C must be signed number types.
+ /// \warning All input data (capacities, supply values, and costs) must
/// be integer.
- /// \warning This algorithm does not support negative costs for such
- /// arcs that have infinite upper bound.
+ /// \warning This algorithm does not support negative costs for
+ /// arcs having infinite upper bound.
///
/// \note For more information about the three available methods,
@@ -117,6 +118,5 @@
/// \ref CycleCanceling provides three different cycle-canceling
/// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel and Tighten"
- /// is used, which proved to be the most efficient and the most robust
- /// on various test inputs.
+ /// is used, which is by far the most efficient and the most robust.
/// However, the other methods can be selected using the \ref run()
/// function with the proper parameter.
@@ -350,5 +350,5 @@
///
/// Using this function has the same effect as using \ref supplyMap()
- /// with such a map in which \c k is assigned to \c s, \c -k is
+ /// with a map in which \c k is assigned to \c s, \c -k is
/// assigned to \c t and all other nodes have zero supply value.
///
Index: lemon/euler.h
===================================================================
--- lemon/euler.h (revision 956)
+++ lemon/euler.h (revision 1023)
@@ -37,5 +37,5 @@
///Euler tour iterator for digraphs.
- /// \ingroup graph_prop
+ /// \ingroup graph_properties
///This iterator provides an Euler tour (Eulerian circuit) of a \e directed
///graph (if there exists) and it converts to the \c Arc type of the digraph.
Index: lemon/grosso_locatelli_pullan_mc.h
===================================================================
--- lemon/grosso_locatelli_pullan_mc.h (revision 1022)
+++ lemon/grosso_locatelli_pullan_mc.h (revision 1022)
@@ -0,0 +1,840 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
+#define LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
+
+/// \ingroup approx_algs
+///
+/// \file
+/// \brief The iterated local search algorithm of Grosso, Locatelli, and Pullan
+/// for the maximum clique problem
+
+#include
+#include
+#include
+#include
+
+namespace lemon {
+
+ /// \addtogroup approx_algs
+ /// @{
+
+ /// \brief Implementation of the iterated local search algorithm of Grosso,
+ /// Locatelli, and Pullan for the maximum clique problem
+ ///
+ /// \ref GrossoLocatelliPullanMc implements the iterated local search
+ /// algorithm of Grosso, Locatelli, and Pullan for solving the \e maximum
+ /// \e clique \e problem \ref grosso08maxclique.
+ /// It is to find the largest complete subgraph (\e clique) in an
+ /// undirected graph, i.e., the largest set of nodes where each
+ /// pair of nodes is connected.
+ ///
+ /// This class provides a simple but highly efficient and robust heuristic
+ /// method that quickly finds a quite large clique, but not necessarily the
+ /// largest one.
+ /// The algorithm performs a certain number of iterations to find several
+ /// cliques and selects the largest one among them. Various limits can be
+ /// specified to control the running time and the effectiveness of the
+ /// search process.
+ ///
+ /// \tparam GR The undirected graph type the algorithm runs on.
+ ///
+ /// \note %GrossoLocatelliPullanMc provides three different node selection
+ /// rules, from which the most powerful one is used by default.
+ /// For more information, see \ref SelectionRule.
+ template
+ class GrossoLocatelliPullanMc
+ {
+ public:
+
+ /// \brief Constants for specifying the node selection rule.
+ ///
+ /// Enum type containing constants for specifying the node selection rule
+ /// for the \ref run() function.
+ ///
+ /// During the algorithm, nodes are selected for addition to the current
+ /// clique according to the applied rule.
+ /// In general, the PENALTY_BASED rule turned out to be the most powerful
+ /// and the most robust, thus it is the default option.
+ /// However, another selection rule can be specified using the \ref run()
+ /// function with the proper parameter.
+ enum SelectionRule {
+
+ /// A node is selected randomly without any evaluation at each step.
+ RANDOM,
+
+ /// A node of maximum degree is selected randomly at each step.
+ DEGREE_BASED,
+
+ /// A node of minimum penalty is selected randomly at each step.
+ /// The node penalties are updated adaptively after each stage of the
+ /// search process.
+ PENALTY_BASED
+ };
+
+ /// \brief Constants for the causes of search termination.
+ ///
+ /// Enum type containing constants for the different causes of search
+ /// termination. The \ref run() function returns one of these values.
+ enum TerminationCause {
+
+ /// The iteration count limit is reached.
+ ITERATION_LIMIT,
+
+ /// The step count limit is reached.
+ STEP_LIMIT,
+
+ /// The clique size limit is reached.
+ SIZE_LIMIT
+ };
+
+ private:
+
+ TEMPLATE_GRAPH_TYPEDEFS(GR);
+
+ typedef std::vector IntVector;
+ typedef std::vector BoolVector;
+ typedef std::vector BoolMatrix;
+ // Note: vector is used instead of vector for efficiency reasons
+
+ // The underlying graph
+ const GR &_graph;
+ IntNodeMap _id;
+
+ // Internal matrix representation of the graph
+ BoolMatrix _gr;
+ int _n;
+
+ // Search options
+ bool _delta_based_restart;
+ int _restart_delta_limit;
+
+ // Search limits
+ int _iteration_limit;
+ int _step_limit;
+ int _size_limit;
+
+ // The current clique
+ BoolVector _clique;
+ int _size;
+
+ // The best clique found so far
+ BoolVector _best_clique;
+ int _best_size;
+
+ // The "distances" of the nodes from the current clique.
+ // _delta[u] is the number of nodes in the clique that are
+ // not connected with u.
+ IntVector _delta;
+
+ // The current tabu set
+ BoolVector _tabu;
+
+ // Random number generator
+ Random _rnd;
+
+ private:
+
+ // Implementation of the RANDOM node selection rule.
+ class RandomSelectionRule
+ {
+ private:
+
+ // References to the algorithm instance
+ const BoolVector &_clique;
+ const IntVector &_delta;
+ const BoolVector &_tabu;
+ Random &_rnd;
+
+ // Pivot rule data
+ int _n;
+
+ public:
+
+ // Constructor
+ RandomSelectionRule(GrossoLocatelliPullanMc &mc) :
+ _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+ _rnd(mc._rnd), _n(mc._n)
+ {}
+
+ // Return a node index for a feasible add move or -1 if no one exists
+ int nextFeasibleAddNode() const {
+ int start_node = _rnd[_n];
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0 && !_tabu[i]) return i;
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0 && !_tabu[i]) return i;
+ }
+ return -1;
+ }
+
+ // Return a node index for a feasible swap move or -1 if no one exists
+ int nextFeasibleSwapNode() const {
+ int start_node = _rnd[_n];
+ for (int i = start_node; i != _n; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i]) return i;
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i]) return i;
+ }
+ return -1;
+ }
+
+ // Return a node index for an add move or -1 if no one exists
+ int nextAddNode() const {
+ int start_node = _rnd[_n];
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0) return i;
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0) return i;
+ }
+ return -1;
+ }
+
+ // Update internal data structures between stages (if necessary)
+ void update() {}
+
+ }; //class RandomSelectionRule
+
+
+ // Implementation of the DEGREE_BASED node selection rule.
+ class DegreeBasedSelectionRule
+ {
+ private:
+
+ // References to the algorithm instance
+ const BoolVector &_clique;
+ const IntVector &_delta;
+ const BoolVector &_tabu;
+ Random &_rnd;
+
+ // Pivot rule data
+ int _n;
+ IntVector _deg;
+
+ public:
+
+ // Constructor
+ DegreeBasedSelectionRule(GrossoLocatelliPullanMc &mc) :
+ _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+ _rnd(mc._rnd), _n(mc._n), _deg(_n)
+ {
+ for (int i = 0; i != _n; i++) {
+ int d = 0;
+ BoolVector &row = mc._gr[i];
+ for (int j = 0; j != _n; j++) {
+ if (row[j]) d++;
+ }
+ _deg[i] = d;
+ }
+ }
+
+ // Return a node index for a feasible add move or -1 if no one exists
+ int nextFeasibleAddNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, max_deg = -1;
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0 && !_tabu[i] && _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0 && !_tabu[i] && _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ return node;
+ }
+
+ // Return a node index for a feasible swap move or -1 if no one exists
+ int nextFeasibleSwapNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, max_deg = -1;
+ for (int i = start_node; i != _n; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+ _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+ _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ return node;
+ }
+
+ // Return a node index for an add move or -1 if no one exists
+ int nextAddNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, max_deg = -1;
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0 && _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0 && _deg[i] > max_deg) {
+ node = i;
+ max_deg = _deg[i];
+ }
+ }
+ return node;
+ }
+
+ // Update internal data structures between stages (if necessary)
+ void update() {}
+
+ }; //class DegreeBasedSelectionRule
+
+
+ // Implementation of the PENALTY_BASED node selection rule.
+ class PenaltyBasedSelectionRule
+ {
+ private:
+
+ // References to the algorithm instance
+ const BoolVector &_clique;
+ const IntVector &_delta;
+ const BoolVector &_tabu;
+ Random &_rnd;
+
+ // Pivot rule data
+ int _n;
+ IntVector _penalty;
+
+ public:
+
+ // Constructor
+ PenaltyBasedSelectionRule(GrossoLocatelliPullanMc &mc) :
+ _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+ _rnd(mc._rnd), _n(mc._n), _penalty(_n, 0)
+ {}
+
+ // Return a node index for a feasible add move or -1 if no one exists
+ int nextFeasibleAddNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, min_p = std::numeric_limits::max();
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0 && !_tabu[i] && _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0 && !_tabu[i] && _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ return node;
+ }
+
+ // Return a node index for a feasible swap move or -1 if no one exists
+ int nextFeasibleSwapNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, min_p = std::numeric_limits::max();
+ for (int i = start_node; i != _n; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+ _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+ _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ return node;
+ }
+
+ // Return a node index for an add move or -1 if no one exists
+ int nextAddNode() const {
+ int start_node = _rnd[_n];
+ int node = -1, min_p = std::numeric_limits::max();
+ for (int i = start_node; i != _n; i++) {
+ if (_delta[i] == 0 && _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ for (int i = 0; i != start_node; i++) {
+ if (_delta[i] == 0 && _penalty[i] < min_p) {
+ node = i;
+ min_p = _penalty[i];
+ }
+ }
+ return node;
+ }
+
+ // Update internal data structures between stages (if necessary)
+ void update() {}
+
+ }; //class PenaltyBasedSelectionRule
+
+ public:
+
+ /// \brief Constructor.
+ ///
+ /// Constructor.
+ /// The global \ref rnd "random number generator instance" is used
+ /// during the algorithm.
+ ///
+ /// \param graph The undirected graph the algorithm runs on.
+ GrossoLocatelliPullanMc(const GR& graph) :
+ _graph(graph), _id(_graph), _rnd(rnd)
+ {
+ initOptions();
+ }
+
+ /// \brief Constructor with random seed.
+ ///
+ /// Constructor with random seed.
+ ///
+ /// \param graph The undirected graph the algorithm runs on.
+ /// \param seed Seed value for the internal random number generator
+ /// that is used during the algorithm.
+ GrossoLocatelliPullanMc(const GR& graph, int seed) :
+ _graph(graph), _id(_graph), _rnd(seed)
+ {
+ initOptions();
+ }
+
+ /// \brief Constructor with random number generator.
+ ///
+ /// Constructor with random number generator.
+ ///
+ /// \param graph The undirected graph the algorithm runs on.
+ /// \param random A random number generator that is used during the
+ /// algorithm.
+ GrossoLocatelliPullanMc(const GR& graph, const Random& random) :
+ _graph(graph), _id(_graph), _rnd(random)
+ {
+ initOptions();
+ }
+
+ /// \name Execution Control
+ /// The \ref run() function can be used to execute the algorithm.\n
+ /// The functions \ref iterationLimit(int), \ref stepLimit(int), and
+ /// \ref sizeLimit(int) can be used to specify various limits for the
+ /// search process.
+
+ /// @{
+
+ /// \brief Sets the maximum number of iterations.
+ ///
+ /// This function sets the maximum number of iterations.
+ /// Each iteration of the algorithm finds a maximal clique (but not
+ /// necessarily the largest one) by performing several search steps
+ /// (node selections).
+ ///
+ /// This limit controls the running time and the success of the
+ /// algorithm. For larger values, the algorithm runs slower, but it more
+ /// likely finds larger cliques. For smaller values, the algorithm is
+ /// faster but probably gives worse results.
+ ///
+ /// The default value is \c 1000.
+ /// \c -1 means that number of iterations is not limited.
+ ///
+ /// \warning You should specify a reasonable limit for the number of
+ /// iterations and/or the number of search steps.
+ ///
+ /// \return (*this)
+ ///
+ /// \sa stepLimit(int)
+ /// \sa sizeLimit(int)
+ GrossoLocatelliPullanMc& iterationLimit(int limit) {
+ _iteration_limit = limit;
+ return *this;
+ }
+
+ /// \brief Sets the maximum number of search steps.
+ ///
+ /// This function sets the maximum number of elementary search steps.
+ /// Each iteration of the algorithm finds a maximal clique (but not
+ /// necessarily the largest one) by performing several search steps
+ /// (node selections).
+ ///
+ /// This limit controls the running time and the success of the
+ /// algorithm. For larger values, the algorithm runs slower, but it more
+ /// likely finds larger cliques. For smaller values, the algorithm is
+ /// faster but probably gives worse results.
+ ///
+ /// The default value is \c -1, which means that number of steps
+ /// is not limited explicitly. However, the number of iterations is
+ /// limited and each iteration performs a finite number of search steps.
+ ///
+ /// \warning You should specify a reasonable limit for the number of
+ /// iterations and/or the number of search steps.
+ ///
+ /// \return (*this)
+ ///
+ /// \sa iterationLimit(int)
+ /// \sa sizeLimit(int)
+ GrossoLocatelliPullanMc& stepLimit(int limit) {
+ _step_limit = limit;
+ return *this;
+ }
+
+ /// \brief Sets the desired clique size.
+ ///
+ /// This function sets the desired clique size that serves as a search
+ /// limit. If a clique of this size (or a larger one) is found, then the
+ /// algorithm terminates.
+ ///
+ /// This function is especially useful if you know an exact upper bound
+ /// for the size of the cliques in the graph or if any clique above
+ /// a certain size limit is sufficient for your application.
+ ///
+ /// The default value is \c -1, which means that the size limit is set to
+ /// the number of nodes in the graph.
+ ///
+ /// \return (*this)
+ ///
+ /// \sa iterationLimit(int)
+ /// \sa stepLimit(int)
+ GrossoLocatelliPullanMc& sizeLimit(int limit) {
+ _size_limit = limit;
+ return *this;
+ }
+
+ /// \brief The maximum number of iterations.
+ ///
+ /// This function gives back the maximum number of iterations.
+ /// \c -1 means that no limit is specified.
+ ///
+ /// \sa iterationLimit(int)
+ int iterationLimit() const {
+ return _iteration_limit;
+ }
+
+ /// \brief The maximum number of search steps.
+ ///
+ /// This function gives back the maximum number of search steps.
+ /// \c -1 means that no limit is specified.
+ ///
+ /// \sa stepLimit(int)
+ int stepLimit() const {
+ return _step_limit;
+ }
+
+ /// \brief The desired clique size.
+ ///
+ /// This function gives back the desired clique size that serves as a
+ /// search limit. \c -1 means that this limit is set to the number of
+ /// nodes in the graph.
+ ///
+ /// \sa sizeLimit(int)
+ int sizeLimit() const {
+ return _size_limit;
+ }
+
+ /// \brief Runs the algorithm.
+ ///
+ /// This function runs the algorithm. If one of the specified limits
+ /// is reached, the search process terminates.
+ ///
+ /// \param rule The node selection rule. For more information, see
+ /// \ref SelectionRule.
+ ///
+ /// \return The termination cause of the search. For more information,
+ /// see \ref TerminationCause.
+ TerminationCause run(SelectionRule rule = PENALTY_BASED)
+ {
+ init();
+ switch (rule) {
+ case RANDOM:
+ return start();
+ case DEGREE_BASED:
+ return start();
+ default:
+ return start();
+ }
+ }
+
+ /// @}
+
+ /// \name Query Functions
+ /// The results of the algorithm can be obtained using these functions.\n
+ /// The run() function must be called before using them.
+
+ /// @{
+
+ /// \brief The size of the found clique
+ ///
+ /// This function returns the size of the found clique.
+ ///
+ /// \pre run() must be called before using this function.
+ int cliqueSize() const {
+ return _best_size;
+ }
+
+ /// \brief Gives back the found clique in a \c bool node map
+ ///
+ /// This function gives back the characteristic vector of the found
+ /// clique in the given node map.
+ /// It must be a \ref concepts::WriteMap "writable" node map with
+ /// \c bool (or convertible) value type.
+ ///
+ /// \pre run() must be called before using this function.
+ template
+ void cliqueMap(CliqueMap &map) const {
+ for (NodeIt n(_graph); n != INVALID; ++n) {
+ map[n] = static_cast(_best_clique[_id[n]]);
+ }
+ }
+
+ /// \brief Iterator to list the nodes of the found clique
+ ///
+ /// This iterator class lists the nodes of the found clique.
+ /// Before using it, you must allocate a GrossoLocatelliPullanMc instance
+ /// and call its \ref GrossoLocatelliPullanMc::run() "run()" method.
+ ///
+ /// The following example prints out the IDs of the nodes in the found
+ /// clique.
+ /// \code
+ /// GrossoLocatelliPullanMc mc(g);
+ /// mc.run();
+ /// for (GrossoLocatelliPullanMc::CliqueNodeIt n(mc);
+ /// n != INVALID; ++n)
+ /// {
+ /// std::cout << g.id(n) << std::endl;
+ /// }
+ /// \endcode
+ class CliqueNodeIt
+ {
+ private:
+ NodeIt _it;
+ BoolNodeMap _map;
+
+ public:
+
+ /// Constructor
+
+ /// Constructor.
+ /// \param mc The algorithm instance.
+ CliqueNodeIt(const GrossoLocatelliPullanMc &mc)
+ : _map(mc._graph)
+ {
+ mc.cliqueMap(_map);
+ for (_it = NodeIt(mc._graph); _it != INVALID && !_map[_it]; ++_it) ;
+ }
+
+ /// Conversion to \c Node
+ operator Node() const { return _it; }
+
+ bool operator==(Invalid) const { return _it == INVALID; }
+ bool operator!=(Invalid) const { return _it != INVALID; }
+
+ /// Next node
+ CliqueNodeIt &operator++() {
+ for (++_it; _it != INVALID && !_map[_it]; ++_it) ;
+ return *this;
+ }
+
+ /// Postfix incrementation
+
+ /// Postfix incrementation.
+ ///
+ /// \warning This incrementation returns a \c Node, not a
+ /// \c CliqueNodeIt as one may expect.
+ typename GR::Node operator++(int) {
+ Node n=*this;
+ ++(*this);
+ return n;
+ }
+
+ };
+
+ /// @}
+
+ private:
+
+ // Initialize search options and limits
+ void initOptions() {
+ // Search options
+ _delta_based_restart = true;
+ _restart_delta_limit = 4;
+
+ // Search limits
+ _iteration_limit = 1000;
+ _step_limit = -1; // this is disabled by default
+ _size_limit = -1; // this is disabled by default
+ }
+
+ // Adds a node to the current clique
+ void addCliqueNode(int u) {
+ if (_clique[u]) return;
+ _clique[u] = true;
+ _size++;
+ BoolVector &row = _gr[u];
+ for (int i = 0; i != _n; i++) {
+ if (!row[i]) _delta[i]++;
+ }
+ }
+
+ // Removes a node from the current clique
+ void delCliqueNode(int u) {
+ if (!_clique[u]) return;
+ _clique[u] = false;
+ _size--;
+ BoolVector &row = _gr[u];
+ for (int i = 0; i != _n; i++) {
+ if (!row[i]) _delta[i]--;
+ }
+ }
+
+ // Initialize data structures
+ void init() {
+ _n = countNodes(_graph);
+ int ui = 0;
+ for (NodeIt u(_graph); u != INVALID; ++u) {
+ _id[u] = ui++;
+ }
+ _gr.clear();
+ _gr.resize(_n, BoolVector(_n, false));
+ ui = 0;
+ for (NodeIt u(_graph); u != INVALID; ++u) {
+ for (IncEdgeIt e(_graph, u); e != INVALID; ++e) {
+ int vi = _id[_graph.runningNode(e)];
+ _gr[ui][vi] = true;
+ _gr[vi][ui] = true;
+ }
+ ++ui;
+ }
+
+ _clique.clear();
+ _clique.resize(_n, false);
+ _size = 0;
+ _best_clique.clear();
+ _best_clique.resize(_n, false);
+ _best_size = 0;
+ _delta.clear();
+ _delta.resize(_n, 0);
+ _tabu.clear();
+ _tabu.resize(_n, false);
+ }
+
+ // Executes the algorithm
+ template
+ TerminationCause start() {
+ if (_n == 0) return SIZE_LIMIT;
+ if (_n == 1) {
+ _best_clique[0] = true;
+ _best_size = 1;
+ return SIZE_LIMIT;
+ }
+
+ // Iterated local search algorithm
+ const int max_size = _size_limit >= 0 ? _size_limit : _n;
+ const int max_restart = _iteration_limit >= 0 ?
+ _iteration_limit : std::numeric_limits::max();
+ const int max_select = _step_limit >= 0 ?
+ _step_limit : std::numeric_limits::max();
+
+ SelectionRuleImpl sel_method(*this);
+ int select = 0, restart = 0;
+ IntVector restart_nodes;
+ while (select < max_select && restart < max_restart) {
+
+ // Perturbation/restart
+ restart++;
+ if (_delta_based_restart) {
+ restart_nodes.clear();
+ for (int i = 0; i != _n; i++) {
+ if (_delta[i] >= _restart_delta_limit)
+ restart_nodes.push_back(i);
+ }
+ }
+ int rs_node = -1;
+ if (restart_nodes.size() > 0) {
+ rs_node = restart_nodes[_rnd[restart_nodes.size()]];
+ } else {
+ rs_node = _rnd[_n];
+ }
+ BoolVector &row = _gr[rs_node];
+ for (int i = 0; i != _n; i++) {
+ if (_clique[i] && !row[i]) delCliqueNode(i);
+ }
+ addCliqueNode(rs_node);
+
+ // Local search
+ _tabu.clear();
+ _tabu.resize(_n, false);
+ bool tabu_empty = true;
+ int max_swap = _size;
+ while (select < max_select) {
+ select++;
+ int u;
+ if ((u = sel_method.nextFeasibleAddNode()) != -1) {
+ // Feasible add move
+ addCliqueNode(u);
+ if (tabu_empty) max_swap = _size;
+ }
+ else if ((u = sel_method.nextFeasibleSwapNode()) != -1) {
+ // Feasible swap move
+ int v = -1;
+ BoolVector &row = _gr[u];
+ for (int i = 0; i != _n; i++) {
+ if (_clique[i] && !row[i]) {
+ v = i;
+ break;
+ }
+ }
+ addCliqueNode(u);
+ delCliqueNode(v);
+ _tabu[v] = true;
+ tabu_empty = false;
+ if (--max_swap <= 0) break;
+ }
+ else if ((u = sel_method.nextAddNode()) != -1) {
+ // Non-feasible add move
+ addCliqueNode(u);
+ }
+ else break;
+ }
+ if (_size > _best_size) {
+ _best_clique = _clique;
+ _best_size = _size;
+ if (_best_size >= max_size) return SIZE_LIMIT;
+ }
+ sel_method.update();
+ }
+
+ return (restart >= max_restart ? ITERATION_LIMIT : STEP_LIMIT);
+ }
+
+ }; //class GrossoLocatelliPullanMc
+
+ ///@}
+
+} //namespace lemon
+
+#endif //LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
Index: lemon/hao_orlin.h
===================================================================
--- lemon/hao_orlin.h (revision 956)
+++ lemon/hao_orlin.h (revision 1019)
@@ -54,6 +54,6 @@
/// preflow push-relabel algorithm. Our implementation calculates
/// the minimum cut in \f$ O(n^2\sqrt{m}) \f$ time (we use the
- /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. The
- /// purpose of such algorithm is e.g. testing network reliability.
+ /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. A notable
+ /// use of this algorithm is testing network reliability.
///
/// For an undirected graph you can run just the first phase of the
@@ -913,4 +913,6 @@
/// source-side (i.e. a set \f$ X\subsetneq V \f$ with
/// \f$ source \in X \f$ and minimal outgoing capacity).
+ /// It updates the stored cut if (and only if) the newly found one
+ /// is better.
///
/// \pre \ref init() must be called before using this function.
@@ -925,4 +927,6 @@
/// sink-side (i.e. a set \f$ X\subsetneq V \f$ with
/// \f$ source \notin X \f$ and minimal outgoing capacity).
+ /// It updates the stored cut if (and only if) the newly found one
+ /// is better.
///
/// \pre \ref init() must be called before using this function.
@@ -934,6 +938,6 @@
/// \brief Run the algorithm.
///
- /// This function runs the algorithm. It finds nodes \c source and
- /// \c target arbitrarily and then calls \ref init(), \ref calculateOut()
+ /// This function runs the algorithm. It chooses source node,
+ /// then calls \ref init(), \ref calculateOut()
/// and \ref calculateIn().
void run() {
@@ -945,7 +949,7 @@
/// \brief Run the algorithm.
///
- /// This function runs the algorithm. It uses the given \c source node,
- /// finds a proper \c target node and then calls the \ref init(),
- /// \ref calculateOut() and \ref calculateIn().
+ /// This function runs the algorithm. It calls \ref init(),
+ /// \ref calculateOut() and \ref calculateIn() with the given
+ /// source node.
void run(const Node& s) {
init(s);
@@ -966,5 +970,7 @@
/// \brief Return the value of the minimum cut.
///
- /// This function returns the value of the minimum cut.
+ /// This function returns the value of the best cut found by the
+ /// previously called \ref run(), \ref calculateOut() or \ref
+ /// calculateIn().
///
/// \pre \ref run(), \ref calculateOut() or \ref calculateIn()
@@ -977,7 +983,11 @@
/// \brief Return a minimum cut.
///
- /// This function sets \c cutMap to the characteristic vector of a
- /// minimum value cut: it will give a non-empty set \f$ X\subsetneq V \f$
- /// with minimal outgoing capacity (i.e. \c cutMap will be \c true exactly
+ /// This function gives the best cut found by the
+ /// previously called \ref run(), \ref calculateOut() or \ref
+ /// calculateIn().
+ ///
+ /// It sets \c cutMap to the characteristic vector of the found
+ /// minimum value cut - a non-empty set \f$ X\subsetneq V \f$
+ /// of minimum outgoing capacity (i.e. \c cutMap will be \c true exactly
/// for the nodes of \f$ X \f$).
///
Index: lemon/kruskal.h
===================================================================
--- lemon/kruskal.h (revision 631)
+++ lemon/kruskal.h (revision 1025)
@@ -31,7 +31,4 @@
///\file
///\brief Kruskal's algorithm to compute a minimum cost spanning tree
-///
-///Kruskal's algorithm to compute a minimum cost spanning tree.
-///
namespace lemon {
Index: lemon/max_cardinality_search.h
===================================================================
--- lemon/max_cardinality_search.h (revision 1020)
+++ lemon/max_cardinality_search.h (revision 1020)
@@ -0,0 +1,786 @@
+/* -*- C++ -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_MAX_CARDINALITY_SEARCH_H
+#define LEMON_MAX_CARDINALITY_SEARCH_H
+
+
+/// \ingroup search
+/// \file
+/// \brief Maximum cardinality search in undirected digraphs.
+
+#include
+#include
+
+#include
+#include
+
+#include
+
+namespace lemon {
+
+ /// \brief Default traits class of MaxCardinalitySearch class.
+ ///
+ /// Default traits class of MaxCardinalitySearch class.
+ /// \param Digraph Digraph type.
+ /// \param CapacityMap Type of capacity map.
+ template
+ struct MaxCardinalitySearchDefaultTraits {
+ /// The digraph type the algorithm runs on.
+ typedef GR Digraph;
+
+ template
+ struct CapMapSelector {
+
+ typedef CM CapacityMap;
+
+ static CapacityMap *createCapacityMap(const Digraph& g) {
+ return new CapacityMap(g);
+ }
+ };
+
+ template
+ struct CapMapSelector > > {
+
+ typedef ConstMap > CapacityMap;
+
+ static CapacityMap *createCapacityMap(const Digraph&) {
+ return new CapacityMap;
+ }
+ };
+
+ /// \brief The type of the map that stores the arc capacities.
+ ///
+ /// The type of the map that stores the arc capacities.
+ /// It must meet the \ref concepts::ReadMap "ReadMap" concept.
+ typedef typename CapMapSelector::CapacityMap CapacityMap;
+
+ /// \brief The type of the capacity of the arcs.
+ typedef typename CapacityMap::Value Value;
+
+ /// \brief Instantiates a CapacityMap.
+ ///
+ /// This function instantiates a \ref CapacityMap.
+ /// \param digraph is the digraph, to which we would like to define
+ /// the CapacityMap.
+ static CapacityMap *createCapacityMap(const Digraph& digraph) {
+ return CapMapSelector::createCapacityMap(digraph);
+ }
+
+ /// \brief The cross reference type used by heap.
+ ///
+ /// The cross reference type used by heap.
+ /// Usually it is \c Digraph::NodeMap.
+ typedef typename Digraph::template NodeMap HeapCrossRef;
+
+ /// \brief Instantiates a HeapCrossRef.
+ ///
+ /// This function instantiates a \ref HeapCrossRef.
+ /// \param digraph is the digraph, to which we would like to define the
+ /// HeapCrossRef.
+ static HeapCrossRef *createHeapCrossRef(const Digraph &digraph) {
+ return new HeapCrossRef(digraph);
+ }
+
+ template
+ struct HeapSelector {
+ template
+ struct Selector {
+ typedef BinHeap > Heap;
+ };
+ };
+
+ template
+ struct HeapSelector > > {
+ template
+ struct Selector {
+ typedef BucketHeap[ Heap;
+ };
+ };
+
+ /// \brief The heap type used by MaxCardinalitySearch algorithm.
+ ///
+ /// The heap type used by MaxCardinalitySearch algorithm. It should
+ /// maximalize the priorities. The default heap type is
+ /// the \ref BinHeap, but it is specialized when the
+ /// CapacityMap is ConstMap >
+ /// to BucketHeap.
+ ///
+ /// \sa MaxCardinalitySearch
+ typedef typename HeapSelector
+ ::template Selector
+ ::Heap Heap;
+
+ /// \brief Instantiates a Heap.
+ ///
+ /// This function instantiates a \ref Heap.
+ /// \param crossref The cross reference of the heap.
+ static Heap *createHeap(HeapCrossRef& crossref) {
+ return new Heap(crossref);
+ }
+
+ /// \brief The type of the map that stores whether a node is processed.
+ ///
+ /// The type of the map that stores whether a node is processed.
+ /// It must meet the \ref concepts::WriteMap "WriteMap" concept.
+ /// By default it is a NullMap.
+ typedef NullMap ProcessedMap;
+
+ /// \brief Instantiates a ProcessedMap.
+ ///
+ /// This function instantiates a \ref ProcessedMap.
+ /// \param digraph is the digraph, to which
+ /// we would like to define the \ref ProcessedMap
+#ifdef DOXYGEN
+ static ProcessedMap *createProcessedMap(const Digraph &digraph)
+#else
+ static ProcessedMap *createProcessedMap(const Digraph &)
+#endif
+ {
+ return new ProcessedMap();
+ }
+
+ /// \brief The type of the map that stores the cardinalities of the nodes.
+ ///
+ /// The type of the map that stores the cardinalities of the nodes.
+ /// It must meet the \ref concepts::WriteMap "WriteMap" concept.
+ typedef typename Digraph::template NodeMap CardinalityMap;
+
+ /// \brief Instantiates a CardinalityMap.
+ ///
+ /// This function instantiates a \ref CardinalityMap.
+ /// \param digraph is the digraph, to which we would like to define the \ref
+ /// CardinalityMap
+ static CardinalityMap *createCardinalityMap(const Digraph &digraph) {
+ return new CardinalityMap(digraph);
+ }
+
+
+ };
+
+ /// \ingroup search
+ ///
+ /// \brief Maximum Cardinality Search algorithm class.
+ ///
+ /// This class provides an efficient implementation of Maximum Cardinality
+ /// Search algorithm. The maximum cardinality search first chooses any
+ /// node of the digraph. Then every time it chooses one unprocessed node
+ /// with maximum cardinality, i.e the sum of capacities on out arcs to the nodes
+ /// which were previusly processed.
+ /// If there is a cut in the digraph the algorithm should choose
+ /// again any unprocessed node of the digraph.
+
+ /// The arc capacities are passed to the algorithm using a
+ /// \ref concepts::ReadMap "ReadMap", so it is easy to change it to any
+ /// kind of capacity.
+ ///
+ /// The type of the capacity is determined by the \ref
+ /// concepts::ReadMap::Value "Value" of the capacity map.
+ ///
+ /// It is also possible to change the underlying priority heap.
+ ///
+ ///
+ /// \param GR The digraph type the algorithm runs on. The value of
+ /// Digraph is not used directly by the search algorithm, it
+ /// is only passed to \ref MaxCardinalitySearchDefaultTraits.
+ /// \param CAP This read-only ArcMap determines the capacities of
+ /// the arcs. It is read once for each arc, so the map may involve in
+ /// relatively time consuming process to compute the arc capacity if
+ /// it is necessary. The default map type is \ref
+ /// ConstMap "ConstMap >". The value
+ /// of CapacityMap is not used directly by search algorithm, it is only
+ /// passed to \ref MaxCardinalitySearchDefaultTraits.
+ /// \param TR Traits class to set various data types used by the
+ /// algorithm. The default traits class is
+ /// \ref MaxCardinalitySearchDefaultTraits
+ /// "MaxCardinalitySearchDefaultTraits".
+ /// See \ref MaxCardinalitySearchDefaultTraits
+ /// for the documentation of a MaxCardinalitySearch traits class.
+
+#ifdef DOXYGEN
+ template
+#else
+ template >,
+ typename TR =
+ MaxCardinalitySearchDefaultTraits >
+#endif
+ class MaxCardinalitySearch {
+ public:
+
+ typedef TR Traits;
+ ///The type of the underlying digraph.
+ typedef typename Traits::Digraph Digraph;
+
+ ///The type of the capacity of the arcs.
+ typedef typename Traits::CapacityMap::Value Value;
+ ///The type of the map that stores the arc capacities.
+ typedef typename Traits::CapacityMap CapacityMap;
+ ///The type of the map indicating if a node is processed.
+ typedef typename Traits::ProcessedMap ProcessedMap;
+ ///The type of the map that stores the cardinalities of the nodes.
+ typedef typename Traits::CardinalityMap CardinalityMap;
+ ///The cross reference type used for the current heap.
+ typedef typename Traits::HeapCrossRef HeapCrossRef;
+ ///The heap type used by the algorithm. It maximizes the priorities.
+ typedef typename Traits::Heap Heap;
+ private:
+ // Pointer to the underlying digraph.
+ const Digraph *_graph;
+ // Pointer to the capacity map
+ const CapacityMap *_capacity;
+ // Indicates if \ref _capacity is locally allocated (\c true) or not.
+ bool local_capacity;
+ // Pointer to the map of cardinality.
+ CardinalityMap *_cardinality;
+ // Indicates if \ref _cardinality is locally allocated (\c true) or not.
+ bool local_cardinality;
+ // Pointer to the map of processed status of the nodes.
+ ProcessedMap *_processed;
+ // Indicates if \ref _processed is locally allocated (\c true) or not.
+ bool local_processed;
+ // Pointer to the heap cross references.
+ HeapCrossRef *_heap_cross_ref;
+ // Indicates if \ref _heap_cross_ref is locally allocated (\c true) or not.
+ bool local_heap_cross_ref;
+ // Pointer to the heap.
+ Heap *_heap;
+ // Indicates if \ref _heap is locally allocated (\c true) or not.
+ bool local_heap;
+
+ public :
+
+ typedef MaxCardinalitySearch Create;
+
+ ///\name Named template parameters
+
+ ///@{
+
+ template
+ struct DefCapacityMapTraits : public Traits {
+ typedef T CapacityMap;
+ static CapacityMap *createCapacityMap(const Digraph &) {
+ LEMON_ASSERT(false,"Uninitialized parameter.");
+ return 0;
+ }
+ };
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// CapacityMap type
+ ///
+ /// \ref named-templ-param "Named parameter" for setting CapacityMap type
+ /// for the algorithm.
+ template
+ struct SetCapacityMap
+ : public MaxCardinalitySearch > {
+ typedef MaxCardinalitySearch > Create;
+ };
+
+ template
+ struct DefCardinalityMapTraits : public Traits {
+ typedef T CardinalityMap;
+ static CardinalityMap *createCardinalityMap(const Digraph &)
+ {
+ LEMON_ASSERT(false,"Uninitialized parameter.");
+ return 0;
+ }
+ };
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// CardinalityMap type
+ ///
+ /// \ref named-templ-param "Named parameter" for setting CardinalityMap
+ /// type for the algorithm.
+ template
+ struct SetCardinalityMap
+ : public MaxCardinalitySearch > {
+ typedef MaxCardinalitySearch > Create;
+ };
+
+ template
+ struct DefProcessedMapTraits : public Traits {
+ typedef T ProcessedMap;
+ static ProcessedMap *createProcessedMap(const Digraph &) {
+ LEMON_ASSERT(false,"Uninitialized parameter.");
+ return 0;
+ }
+ };
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// ProcessedMap type
+ ///
+ /// \ref named-templ-param "Named parameter" for setting ProcessedMap type
+ /// for the algorithm.
+ template
+ struct SetProcessedMap
+ : public MaxCardinalitySearch > {
+ typedef MaxCardinalitySearch > Create;
+ };
+
+ template
+ struct DefHeapTraits : public Traits {
+ typedef CR HeapCrossRef;
+ typedef H Heap;
+ static HeapCrossRef *createHeapCrossRef(const Digraph &) {
+ LEMON_ASSERT(false,"Uninitialized parameter.");
+ return 0;
+ }
+ static Heap *createHeap(HeapCrossRef &) {
+ LEMON_ASSERT(false,"Uninitialized parameter.");
+ return 0;
+ }
+ };
+ /// \brief \ref named-templ-param "Named parameter" for setting heap
+ /// and cross reference type
+ ///
+ /// \ref named-templ-param "Named parameter" for setting heap and cross
+ /// reference type for the algorithm.
+ template >
+ struct SetHeap
+ : public MaxCardinalitySearch > {
+ typedef MaxCardinalitySearch< Digraph, CapacityMap,
+ DefHeapTraits > Create;
+ };
+
+ template
+ struct DefStandardHeapTraits : public Traits {
+ typedef CR HeapCrossRef;
+ typedef H Heap;
+ static HeapCrossRef *createHeapCrossRef(const Digraph &digraph) {
+ return new HeapCrossRef(digraph);
+ }
+ static Heap *createHeap(HeapCrossRef &crossref) {
+ return new Heap(crossref);
+ }
+ };
+
+ /// \brief \ref named-templ-param "Named parameter" for setting heap and
+ /// cross reference type with automatic allocation
+ ///
+ /// \ref named-templ-param "Named parameter" for setting heap and cross
+ /// reference type. It can allocate the heap and the cross reference
+ /// object if the cross reference's constructor waits for the digraph as
+ /// parameter and the heap's constructor waits for the cross reference.
+ template >
+ struct SetStandardHeap
+ : public MaxCardinalitySearch > {
+ typedef MaxCardinalitySearch >
+ Create;
+ };
+
+ ///@}
+
+
+ protected:
+
+ MaxCardinalitySearch() {}
+
+ public:
+
+ /// \brief Constructor.
+ ///
+ ///\param digraph the digraph the algorithm will run on.
+ ///\param capacity the capacity map used by the algorithm.
+ ///When no capacity map given, a constant 1 capacity map will
+ ///be allocated.
+#ifdef DOXYGEN
+ MaxCardinalitySearch(const Digraph& digraph,
+ const CapacityMap& capacity=0 ) :
+#else
+ MaxCardinalitySearch(const Digraph& digraph,
+ const CapacityMap& capacity=*static_cast(0) ) :
+#endif
+ _graph(&digraph),
+ _capacity(&capacity), local_capacity(false),
+ _cardinality(0), local_cardinality(false),
+ _processed(0), local_processed(false),
+ _heap_cross_ref(0), local_heap_cross_ref(false),
+ _heap(0), local_heap(false)
+ { }
+
+ /// \brief Destructor.
+ ~MaxCardinalitySearch() {
+ if(local_capacity) delete _capacity;
+ if(local_cardinality) delete _cardinality;
+ if(local_processed) delete _processed;
+ if(local_heap_cross_ref) delete _heap_cross_ref;
+ if(local_heap) delete _heap;
+ }
+
+ /// \brief Sets the capacity map.
+ ///
+ /// Sets the capacity map.
+ /// \return (*this)
+ MaxCardinalitySearch &capacityMap(const CapacityMap &m) {
+ if (local_capacity) {
+ delete _capacity;
+ local_capacity=false;
+ }
+ _capacity=&m;
+ return *this;
+ }
+
+ /// \brief Returns a const reference to the capacity map.
+ ///
+ /// Returns a const reference to the capacity map used by
+ /// the algorithm.
+ const CapacityMap &capacityMap() const {
+ return *_capacity;
+ }
+
+ /// \brief Sets the map storing the cardinalities calculated by the
+ /// algorithm.
+ ///
+ /// Sets the map storing the cardinalities calculated by the algorithm.
+ /// If you don't use this function before calling \ref run(),
+ /// it will allocate one. The destuctor deallocates this
+ /// automatically allocated map, of course.
+ /// \return (*this)
+ MaxCardinalitySearch &cardinalityMap(CardinalityMap &m) {
+ if(local_cardinality) {
+ delete _cardinality;
+ local_cardinality=false;
+ }
+ _cardinality = &m;
+ return *this;
+ }
+
+ /// \brief Sets the map storing the processed nodes.
+ ///
+ /// Sets the map storing the processed nodes.
+ /// If you don't use this function before calling \ref run(),
+ /// it will allocate one. The destuctor deallocates this
+ /// automatically allocated map, of course.
+ /// \return (*this)
+ MaxCardinalitySearch &processedMap(ProcessedMap &m)
+ {
+ if(local_processed) {
+ delete _processed;
+ local_processed=false;
+ }
+ _processed = &m;
+ return *this;
+ }
+
+ /// \brief Returns a const reference to the cardinality map.
+ ///
+ /// Returns a const reference to the cardinality map used by
+ /// the algorithm.
+ const ProcessedMap &processedMap() const {
+ return *_processed;
+ }
+
+ /// \brief Sets the heap and the cross reference used by algorithm.
+ ///
+ /// Sets the heap and the cross reference used by algorithm.
+ /// If you don't use this function before calling \ref run(),
+ /// it will allocate one. The destuctor deallocates this
+ /// automatically allocated map, of course.
+ /// \return (*this)
+ MaxCardinalitySearch &heap(Heap& hp, HeapCrossRef &cr) {
+ if(local_heap_cross_ref) {
+ delete _heap_cross_ref;
+ local_heap_cross_ref = false;
+ }
+ _heap_cross_ref = &cr;
+ if(local_heap) {
+ delete _heap;
+ local_heap = false;
+ }
+ _heap = &hp;
+ return *this;
+ }
+
+ /// \brief Returns a const reference to the heap.
+ ///
+ /// Returns a const reference to the heap used by
+ /// the algorithm.
+ const Heap &heap() const {
+ return *_heap;
+ }
+
+ /// \brief Returns a const reference to the cross reference.
+ ///
+ /// Returns a const reference to the cross reference
+ /// of the heap.
+ const HeapCrossRef &heapCrossRef() const {
+ return *_heap_cross_ref;
+ }
+
+ private:
+
+ typedef typename Digraph::Node Node;
+ typedef typename Digraph::NodeIt NodeIt;
+ typedef typename Digraph::Arc Arc;
+ typedef typename Digraph::InArcIt InArcIt;
+
+ void create_maps() {
+ if(!_capacity) {
+ local_capacity = true;
+ _capacity = Traits::createCapacityMap(*_graph);
+ }
+ if(!_cardinality) {
+ local_cardinality = true;
+ _cardinality = Traits::createCardinalityMap(*_graph);
+ }
+ if(!_processed) {
+ local_processed = true;
+ _processed = Traits::createProcessedMap(*_graph);
+ }
+ if (!_heap_cross_ref) {
+ local_heap_cross_ref = true;
+ _heap_cross_ref = Traits::createHeapCrossRef(*_graph);
+ }
+ if (!_heap) {
+ local_heap = true;
+ _heap = Traits::createHeap(*_heap_cross_ref);
+ }
+ }
+
+ void finalizeNodeData(Node node, Value capacity) {
+ _processed->set(node, true);
+ _cardinality->set(node, capacity);
+ }
+
+ public:
+ /// \name Execution control
+ /// The simplest way to execute the algorithm is to use
+ /// one of the member functions called \ref run().
+ /// \n
+ /// If you need more control on the execution,
+ /// first you must call \ref init(), then you can add several source nodes
+ /// with \ref addSource().
+ /// Finally \ref start() will perform the computation.
+
+ ///@{
+
+ /// \brief Initializes the internal data structures.
+ ///
+ /// Initializes the internal data structures, and clears the heap.
+ void init() {
+ create_maps();
+ _heap->clear();
+ for (NodeIt it(*_graph) ; it != INVALID ; ++it) {
+ _processed->set(it, false);
+ _heap_cross_ref->set(it, Heap::PRE_HEAP);
+ }
+ }
+
+ /// \brief Adds a new source node.
+ ///
+ /// Adds a new source node to the priority heap.
+ ///
+ /// It checks if the node has not yet been added to the heap.
+ void addSource(Node source, Value capacity = 0) {
+ if(_heap->state(source) == Heap::PRE_HEAP) {
+ _heap->push(source, capacity);
+ }
+ }
+
+ /// \brief Processes the next node in the priority heap
+ ///
+ /// Processes the next node in the priority heap.
+ ///
+ /// \return The processed node.
+ ///
+ /// \warning The priority heap must not be empty!
+ Node processNextNode() {
+ Node node = _heap->top();
+ finalizeNodeData(node, _heap->prio());
+ _heap->pop();
+
+ for (InArcIt it(*_graph, node); it != INVALID; ++it) {
+ Node source = _graph->source(it);
+ switch (_heap->state(source)) {
+ case Heap::PRE_HEAP:
+ _heap->push(source, (*_capacity)[it]);
+ break;
+ case Heap::IN_HEAP:
+ _heap->decrease(source, (*_heap)[source] + (*_capacity)[it]);
+ break;
+ case Heap::POST_HEAP:
+ break;
+ }
+ }
+ return node;
+ }
+
+ /// \brief Next node to be processed.
+ ///
+ /// Next node to be processed.
+ ///
+ /// \return The next node to be processed or INVALID if the
+ /// priority heap is empty.
+ Node nextNode() {
+ return !_heap->empty() ? _heap->top() : INVALID;
+ }
+
+ /// \brief Returns \c false if there are nodes
+ /// to be processed in the priority heap
+ ///
+ /// Returns \c false if there are nodes
+ /// to be processed in the priority heap
+ bool emptyQueue() { return _heap->empty(); }
+ /// \brief Returns the number of the nodes to be processed
+ /// in the priority heap
+ ///
+ /// Returns the number of the nodes to be processed in the priority heap
+ int emptySize() { return _heap->size(); }
+
+ /// \brief Executes the algorithm.
+ ///
+ /// Executes the algorithm.
+ ///
+ ///\pre init() must be called and at least one node should be added
+ /// with addSource() before using this function.
+ ///
+ /// This method runs the Maximum Cardinality Search algorithm from the
+ /// source node(s).
+ void start() {
+ while ( !_heap->empty() ) processNextNode();
+ }
+
+ /// \brief Executes the algorithm until \c dest is reached.
+ ///
+ /// Executes the algorithm until \c dest is reached.
+ ///
+ /// \pre init() must be called and at least one node should be added
+ /// with addSource() before using this function.
+ ///
+ /// This method runs the %MaxCardinalitySearch algorithm from the source
+ /// nodes.
+ void start(Node dest) {
+ while ( !_heap->empty() && _heap->top()!=dest ) processNextNode();
+ if ( !_heap->empty() ) finalizeNodeData(_heap->top(), _heap->prio());
+ }
+
+ /// \brief Executes the algorithm until a condition is met.
+ ///
+ /// Executes the algorithm until a condition is met.
+ ///
+ /// \pre init() must be called and at least one node should be added
+ /// with addSource() before using this function.
+ ///
+ /// \param nm must be a bool (or convertible) node map. The algorithm
+ /// will stop when it reaches a node \c v with nm[v]==true.
+ template
+ void start(const NodeBoolMap &nm) {
+ while ( !_heap->empty() && !nm[_heap->top()] ) processNextNode();
+ if ( !_heap->empty() ) finalizeNodeData(_heap->top(),_heap->prio());
+ }
+
+ /// \brief Runs the maximum cardinality search algorithm from node \c s.
+ ///
+ /// This method runs the %MaxCardinalitySearch algorithm from a root
+ /// node \c s.
+ ///
+ ///\note d.run(s) is just a shortcut of the following code.
+ ///\code
+ /// d.init();
+ /// d.addSource(s);
+ /// d.start();
+ ///\endcode
+ void run(Node s) {
+ init();
+ addSource(s);
+ start();
+ }
+
+ /// \brief Runs the maximum cardinality search algorithm for the
+ /// whole digraph.
+ ///
+ /// This method runs the %MaxCardinalitySearch algorithm from all
+ /// unprocessed node of the digraph.
+ ///
+ ///\note d.run(s) is just a shortcut of the following code.
+ ///\code
+ /// d.init();
+ /// for (NodeIt it(digraph); it != INVALID; ++it) {
+ /// if (!d.reached(it)) {
+ /// d.addSource(s);
+ /// d.start();
+ /// }
+ /// }
+ ///\endcode
+ void run() {
+ init();
+ for (NodeIt it(*_graph); it != INVALID; ++it) {
+ if (!reached(it)) {
+ addSource(it);
+ start();
+ }
+ }
+ }
+
+ ///@}
+
+ /// \name Query Functions
+ /// The results of the maximum cardinality search algorithm can be
+ /// obtained using these functions.
+ /// \n
+ /// Before the use of these functions, either run() or start() must be
+ /// called.
+
+ ///@{
+
+ /// \brief The cardinality of a node.
+ ///
+ /// Returns the cardinality of a node.
+ /// \pre \ref run() must be called before using this function.
+ /// \warning If node \c v in unreachable from the root the return value
+ /// of this funcion is undefined.
+ Value cardinality(Node node) const { return (*_cardinality)[node]; }
+
+ /// \brief The current cardinality of a node.
+ ///
+ /// Returns the current cardinality of a node.
+ /// \pre the given node should be reached but not processed
+ Value currentCardinality(Node node) const { return (*_heap)[node]; }
+
+ /// \brief Returns a reference to the NodeMap of cardinalities.
+ ///
+ /// Returns a reference to the NodeMap of cardinalities. \pre \ref run()
+ /// must be called before using this function.
+ const CardinalityMap &cardinalityMap() const { return *_cardinality;}
+
+ /// \brief Checks if a node is reachable from the root.
+ ///
+ /// Returns \c true if \c v is reachable from the root.
+ /// \warning The source nodes are initated as unreached.
+ /// \pre \ref run() must be called before using this function.
+ bool reached(Node v) { return (*_heap_cross_ref)[v] != Heap::PRE_HEAP; }
+
+ /// \brief Checks if a node is processed.
+ ///
+ /// Returns \c true if \c v is processed, i.e. the shortest
+ /// path to \c v has already found.
+ /// \pre \ref run() must be called before using this function.
+ bool processed(Node v) { return (*_heap_cross_ref)[v] == Heap::POST_HEAP; }
+
+ ///@}
+ };
+
+}
+
+#endif
Index: lemon/nagamochi_ibaraki.h
===================================================================
--- lemon/nagamochi_ibaraki.h (revision 1017)
+++ lemon/nagamochi_ibaraki.h (revision 1017)
@@ -0,0 +1,697 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_NAGAMOCHI_IBARAKI_H
+#define LEMON_NAGAMOCHI_IBARAKI_H
+
+
+/// \ingroup min_cut
+/// \file
+/// \brief Implementation of the Nagamochi-Ibaraki algorithm.
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+namespace lemon {
+
+ /// \brief Default traits class for NagamochiIbaraki class.
+ ///
+ /// Default traits class for NagamochiIbaraki class.
+ /// \param GR The undirected graph type.
+ /// \param CM Type of capacity map.
+ template
+ struct NagamochiIbarakiDefaultTraits {
+ /// The type of the capacity map.
+ typedef typename CM::Value Value;
+
+ /// The undirected graph type the algorithm runs on.
+ typedef GR Graph;
+
+ /// \brief The type of the map that stores the edge capacities.
+ ///
+ /// The type of the map that stores the edge capacities.
+ /// It must meet the \ref concepts::ReadMap "ReadMap" concept.
+ typedef CM CapacityMap;
+
+ /// \brief Instantiates a CapacityMap.
+ ///
+ /// This function instantiates a \ref CapacityMap.
+#ifdef DOXYGEN
+ static CapacityMap *createCapacityMap(const Graph& graph)
+#else
+ static CapacityMap *createCapacityMap(const Graph&)
+#endif
+ {
+ LEMON_ASSERT(false, "CapacityMap is not initialized");
+ return 0; // ignore warnings
+ }
+
+ /// \brief The cross reference type used by heap.
+ ///
+ /// The cross reference type used by heap.
+ /// Usually \c Graph::NodeMap.
+ typedef typename Graph::template NodeMap HeapCrossRef;
+
+ /// \brief Instantiates a HeapCrossRef.
+ ///
+ /// This function instantiates a \ref HeapCrossRef.
+ /// \param g is the graph, to which we would like to define the
+ /// \ref HeapCrossRef.
+ static HeapCrossRef *createHeapCrossRef(const Graph& g) {
+ return new HeapCrossRef(g);
+ }
+
+ /// \brief The heap type used by NagamochiIbaraki algorithm.
+ ///
+ /// The heap type used by NagamochiIbaraki algorithm. It has to
+ /// maximize the priorities.
+ ///
+ /// \sa BinHeap
+ /// \sa NagamochiIbaraki
+ typedef BinHeap > Heap;
+
+ /// \brief Instantiates a Heap.
+ ///
+ /// This function instantiates a \ref Heap.
+ /// \param r is the cross reference of the heap.
+ static Heap *createHeap(HeapCrossRef& r) {
+ return new Heap(r);
+ }
+ };
+
+ /// \ingroup min_cut
+ ///
+ /// \brief Calculates the minimum cut in an undirected graph.
+ ///
+ /// Calculates the minimum cut in an undirected graph with the
+ /// Nagamochi-Ibaraki algorithm. The algorithm separates the graph's
+ /// nodes into two partitions with the minimum sum of edge capacities
+ /// between the two partitions. The algorithm can be used to test
+ /// the network reliability, especially to test how many links have
+ /// to be destroyed in the network to split it to at least two
+ /// distinict subnetworks.
+ ///
+ /// The complexity of the algorithm is \f$ O(nm\log(n)) \f$ but with
+ /// \ref FibHeap "Fibonacci heap" it can be decreased to
+ /// \f$ O(nm+n^2\log(n)) \f$. When the edges have unit capacities,
+ /// \c BucketHeap can be used which yields \f$ O(nm) \f$ time
+ /// complexity.
+ ///
+ /// \warning The value type of the capacity map should be able to
+ /// hold any cut value of the graph, otherwise the result can
+ /// overflow.
+ /// \note This capacity is supposed to be integer type.
+#ifdef DOXYGEN
+ template
+#else
+ template ,
+ typename TR = NagamochiIbarakiDefaultTraits >
+#endif
+ class NagamochiIbaraki {
+ public:
+
+ typedef TR Traits;
+ /// The type of the underlying graph.
+ typedef typename Traits::Graph Graph;
+
+ /// The type of the capacity map.
+ typedef typename Traits::CapacityMap CapacityMap;
+ /// The value type of the capacity map.
+ typedef typename Traits::CapacityMap::Value Value;
+
+ /// The heap type used by the algorithm.
+ typedef typename Traits::Heap Heap;
+ /// The cross reference type used for the heap.
+ typedef typename Traits::HeapCrossRef HeapCrossRef;
+
+ ///\name Named template parameters
+
+ ///@{
+
+ struct SetUnitCapacityTraits : public Traits {
+ typedef ConstMap > CapacityMap;
+ static CapacityMap *createCapacityMap(const Graph&) {
+ return new CapacityMap();
+ }
+ };
+
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// the capacity map to a constMap() instance
+ ///
+ /// \ref named-templ-param "Named parameter" for setting
+ /// the capacity map to a constMap() instance
+ struct SetUnitCapacity
+ : public NagamochiIbaraki {
+ typedef NagamochiIbaraki Create;
+ };
+
+
+ template
+ struct SetHeapTraits : public Traits {
+ typedef CR HeapCrossRef;
+ typedef H Heap;
+ static HeapCrossRef *createHeapCrossRef(int num) {
+ LEMON_ASSERT(false, "HeapCrossRef is not initialized");
+ return 0; // ignore warnings
+ }
+ static Heap *createHeap(HeapCrossRef &) {
+ LEMON_ASSERT(false, "Heap is not initialized");
+ return 0; // ignore warnings
+ }
+ };
+
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// heap and cross reference type
+ ///
+ /// \ref named-templ-param "Named parameter" for setting heap and
+ /// cross reference type. The heap has to maximize the priorities.
+ template >
+ struct SetHeap
+ : public NagamochiIbaraki > {
+ typedef NagamochiIbaraki< Graph, CapacityMap, SetHeapTraits >
+ Create;
+ };
+
+ template
+ struct SetStandardHeapTraits : public Traits {
+ typedef CR HeapCrossRef;
+ typedef H Heap;
+ static HeapCrossRef *createHeapCrossRef(int size) {
+ return new HeapCrossRef(size);
+ }
+ static Heap *createHeap(HeapCrossRef &crossref) {
+ return new Heap(crossref);
+ }
+ };
+
+ /// \brief \ref named-templ-param "Named parameter" for setting
+ /// heap and cross reference type with automatic allocation
+ ///
+ /// \ref named-templ-param "Named parameter" for setting heap and
+ /// cross reference type with automatic allocation. They should
+ /// have standard constructor interfaces to be able to
+ /// automatically created by the algorithm (i.e. the graph should
+ /// be passed to the constructor of the cross reference and the
+ /// cross reference should be passed to the constructor of the
+ /// heap). However, external heap and cross reference objects
+ /// could also be passed to the algorithm using the \ref heap()
+ /// function before calling \ref run() or \ref init(). The heap
+ /// has to maximize the priorities.
+ /// \sa SetHeap
+ template >
+ struct SetStandardHeap
+ : public NagamochiIbaraki > {
+ typedef NagamochiIbaraki > Create;
+ };
+
+ ///@}
+
+
+ private:
+
+ const Graph &_graph;
+ const CapacityMap *_capacity;
+ bool _local_capacity; // unit capacity
+
+ struct ArcData {
+ typename Graph::Node target;
+ int prev, next;
+ };
+ struct EdgeData {
+ Value capacity;
+ Value cut;
+ };
+
+ struct NodeData {
+ int first_arc;
+ typename Graph::Node prev, next;
+ int curr_arc;
+ typename Graph::Node last_rep;
+ Value sum;
+ };
+
+ typename Graph::template NodeMap *_nodes;
+ std::vector _arcs;
+ std::vector _edges;
+
+ typename Graph::Node _first_node;
+ int _node_num;
+
+ Value _min_cut;
+
+ HeapCrossRef *_heap_cross_ref;
+ bool _local_heap_cross_ref;
+ Heap *_heap;
+ bool _local_heap;
+
+ typedef typename Graph::template NodeMap NodeList;
+ NodeList *_next_rep;
+
+ typedef typename Graph::template NodeMap MinCutMap;
+ MinCutMap *_cut_map;
+
+ void createStructures() {
+ if (!_nodes) {
+ _nodes = new (typename Graph::template NodeMap)(_graph);
+ }
+ if (!_capacity) {
+ _local_capacity = true;
+ _capacity = Traits::createCapacityMap(_graph);
+ }
+ if (!_heap_cross_ref) {
+ _local_heap_cross_ref = true;
+ _heap_cross_ref = Traits::createHeapCrossRef(_graph);
+ }
+ if (!_heap) {
+ _local_heap = true;
+ _heap = Traits::createHeap(*_heap_cross_ref);
+ }
+ if (!_next_rep) {
+ _next_rep = new NodeList(_graph);
+ }
+ if (!_cut_map) {
+ _cut_map = new MinCutMap(_graph);
+ }
+ }
+
+ public :
+
+ typedef NagamochiIbaraki Create;
+
+
+ /// \brief Constructor.
+ ///
+ /// \param graph The graph the algorithm runs on.
+ /// \param capacity The capacity map used by the algorithm.
+ NagamochiIbaraki(const Graph& graph, const CapacityMap& capacity)
+ : _graph(graph), _capacity(&capacity), _local_capacity(false),
+ _nodes(0), _arcs(), _edges(), _min_cut(),
+ _heap_cross_ref(0), _local_heap_cross_ref(false),
+ _heap(0), _local_heap(false),
+ _next_rep(0), _cut_map(0) {}
+
+ /// \brief Constructor.
+ ///
+ /// This constructor can be used only when the Traits class
+ /// defines how can the local capacity map be instantiated.
+ /// If the SetUnitCapacity used the algorithm automatically
+ /// constructs the capacity map.
+ ///
+ ///\param graph The graph the algorithm runs on.
+ NagamochiIbaraki(const Graph& graph)
+ : _graph(graph), _capacity(0), _local_capacity(false),
+ _nodes(0), _arcs(), _edges(), _min_cut(),
+ _heap_cross_ref(0), _local_heap_cross_ref(false),
+ _heap(0), _local_heap(false),
+ _next_rep(0), _cut_map(0) {}
+
+ /// \brief Destructor.
+ ///
+ /// Destructor.
+ ~NagamochiIbaraki() {
+ if (_local_capacity) delete _capacity;
+ if (_nodes) delete _nodes;
+ if (_local_heap) delete _heap;
+ if (_local_heap_cross_ref) delete _heap_cross_ref;
+ if (_next_rep) delete _next_rep;
+ if (_cut_map) delete _cut_map;
+ }
+
+ /// \brief Sets the heap and the cross reference used by algorithm.
+ ///
+ /// Sets the heap and the cross reference used by algorithm.
+ /// If you don't use this function before calling \ref run(),
+ /// it will allocate one. The destuctor deallocates this
+ /// automatically allocated heap and cross reference, of course.
+ /// \return (*this)
+ NagamochiIbaraki &heap(Heap& hp, HeapCrossRef &cr)
+ {
+ if (_local_heap_cross_ref) {
+ delete _heap_cross_ref;
+ _local_heap_cross_ref = false;
+ }
+ _heap_cross_ref = &cr;
+ if (_local_heap) {
+ delete _heap;
+ _local_heap = false;
+ }
+ _heap = &hp;
+ return *this;
+ }
+
+ /// \name Execution control
+ /// The simplest way to execute the algorithm is to use
+ /// one of the member functions called \c run().
+ /// \n
+ /// If you need more control on the execution,
+ /// first you must call \ref init() and then call the start()
+ /// or proper times the processNextPhase() member functions.
+
+ ///@{
+
+ /// \brief Initializes the internal data structures.
+ ///
+ /// Initializes the internal data structures.
+ void init() {
+ createStructures();
+
+ int edge_num = countEdges(_graph);
+ _edges.resize(edge_num);
+ _arcs.resize(2 * edge_num);
+
+ typename Graph::Node prev = INVALID;
+ _node_num = 0;
+ for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+ (*_cut_map)[n] = false;
+ (*_next_rep)[n] = INVALID;
+ (*_nodes)[n].last_rep = n;
+ (*_nodes)[n].first_arc = -1;
+ (*_nodes)[n].curr_arc = -1;
+ (*_nodes)[n].prev = prev;
+ if (prev != INVALID) {
+ (*_nodes)[prev].next = n;
+ }
+ (*_nodes)[n].next = INVALID;
+ (*_nodes)[n].sum = 0;
+ prev = n;
+ ++_node_num;
+ }
+
+ _first_node = typename Graph::NodeIt(_graph);
+
+ int index = 0;
+ for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+ for (typename Graph::OutArcIt a(_graph, n); a != INVALID; ++a) {
+ typename Graph::Node m = _graph.target(a);
+
+ if (!(n < m)) continue;
+
+ (*_nodes)[n].sum += (*_capacity)[a];
+ (*_nodes)[m].sum += (*_capacity)[a];
+
+ int c = (*_nodes)[m].curr_arc;
+ if (c != -1 && _arcs[c ^ 1].target == n) {
+ _edges[c >> 1].capacity += (*_capacity)[a];
+ } else {
+ _edges[index].capacity = (*_capacity)[a];
+
+ _arcs[index << 1].prev = -1;
+ if ((*_nodes)[n].first_arc != -1) {
+ _arcs[(*_nodes)[n].first_arc].prev = (index << 1);
+ }
+ _arcs[index << 1].next = (*_nodes)[n].first_arc;
+ (*_nodes)[n].first_arc = (index << 1);
+ _arcs[index << 1].target = m;
+
+ (*_nodes)[m].curr_arc = (index << 1);
+
+ _arcs[(index << 1) | 1].prev = -1;
+ if ((*_nodes)[m].first_arc != -1) {
+ _arcs[(*_nodes)[m].first_arc].prev = ((index << 1) | 1);
+ }
+ _arcs[(index << 1) | 1].next = (*_nodes)[m].first_arc;
+ (*_nodes)[m].first_arc = ((index << 1) | 1);
+ _arcs[(index << 1) | 1].target = n;
+
+ ++index;
+ }
+ }
+ }
+
+ typename Graph::Node cut_node = INVALID;
+ _min_cut = std::numeric_limits::max();
+
+ for (typename Graph::Node n = _first_node;
+ n != INVALID; n = (*_nodes)[n].next) {
+ if ((*_nodes)[n].sum < _min_cut) {
+ cut_node = n;
+ _min_cut = (*_nodes)[n].sum;
+ }
+ }
+ (*_cut_map)[cut_node] = true;
+ if (_min_cut == 0) {
+ _first_node = INVALID;
+ }
+ }
+
+ public:
+
+ /// \brief Processes the next phase
+ ///
+ /// Processes the next phase in the algorithm. It must be called
+ /// at most one less the number of the nodes in the graph.
+ ///
+ ///\return %True when the algorithm finished.
+ bool processNextPhase() {
+ if (_first_node == INVALID) return true;
+
+ _heap->clear();
+ for (typename Graph::Node n = _first_node;
+ n != INVALID; n = (*_nodes)[n].next) {
+ (*_heap_cross_ref)[n] = Heap::PRE_HEAP;
+ }
+
+ std::vector order;
+ order.reserve(_node_num);
+ int sep = 0;
+
+ Value alpha = 0;
+ Value pmc = std::numeric_limits::max();
+
+ _heap->push(_first_node, static_cast(0));
+ while (!_heap->empty()) {
+ typename Graph::Node n = _heap->top();
+ Value v = _heap->prio();
+
+ _heap->pop();
+ for (int a = (*_nodes)[n].first_arc; a != -1; a = _arcs[a].next) {
+ switch (_heap->state(_arcs[a].target)) {
+ case Heap::PRE_HEAP:
+ {
+ Value nv = _edges[a >> 1].capacity;
+ _heap->push(_arcs[a].target, nv);
+ _edges[a >> 1].cut = nv;
+ } break;
+ case Heap::IN_HEAP:
+ {
+ Value nv = _edges[a >> 1].capacity + (*_heap)[_arcs[a].target];
+ _heap->decrease(_arcs[a].target, nv);
+ _edges[a >> 1].cut = nv;
+ } break;
+ case Heap::POST_HEAP:
+ break;
+ }
+ }
+
+ alpha += (*_nodes)[n].sum;
+ alpha -= 2 * v;
+
+ order.push_back(n);
+ if (!_heap->empty()) {
+ if (alpha < pmc) {
+ pmc = alpha;
+ sep = order.size();
+ }
+ }
+ }
+
+ if (static_cast(order.size()) < _node_num) {
+ _first_node = INVALID;
+ for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+ (*_cut_map)[n] = false;
+ }
+ for (int i = 0; i < static_cast(order.size()); ++i) {
+ typename Graph::Node n = order[i];
+ while (n != INVALID) {
+ (*_cut_map)[n] = true;
+ n = (*_next_rep)[n];
+ }
+ }
+ _min_cut = 0;
+ return true;
+ }
+
+ if (pmc < _min_cut) {
+ for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+ (*_cut_map)[n] = false;
+ }
+ for (int i = 0; i < sep; ++i) {
+ typename Graph::Node n = order[i];
+ while (n != INVALID) {
+ (*_cut_map)[n] = true;
+ n = (*_next_rep)[n];
+ }
+ }
+ _min_cut = pmc;
+ }
+
+ for (typename Graph::Node n = _first_node;
+ n != INVALID; n = (*_nodes)[n].next) {
+ bool merged = false;
+ for (int a = (*_nodes)[n].first_arc; a != -1; a = _arcs[a].next) {
+ if (!(_edges[a >> 1].cut < pmc)) {
+ if (!merged) {
+ for (int b = (*_nodes)[n].first_arc; b != -1; b = _arcs[b].next) {
+ (*_nodes)[_arcs[b].target].curr_arc = b;
+ }
+ merged = true;
+ }
+ typename Graph::Node m = _arcs[a].target;
+ int nb = 0;
+ for (int b = (*_nodes)[m].first_arc; b != -1; b = nb) {
+ nb = _arcs[b].next;
+ if ((b ^ a) == 1) continue;
+ typename Graph::Node o = _arcs[b].target;
+ int c = (*_nodes)[o].curr_arc;
+ if (c != -1 && _arcs[c ^ 1].target == n) {
+ _edges[c >> 1].capacity += _edges[b >> 1].capacity;
+ (*_nodes)[n].sum += _edges[b >> 1].capacity;
+ if (_edges[b >> 1].cut < _edges[c >> 1].cut) {
+ _edges[b >> 1].cut = _edges[c >> 1].cut;
+ }
+ if (_arcs[b ^ 1].prev != -1) {
+ _arcs[_arcs[b ^ 1].prev].next = _arcs[b ^ 1].next;
+ } else {
+ (*_nodes)[o].first_arc = _arcs[b ^ 1].next;
+ }
+ if (_arcs[b ^ 1].next != -1) {
+ _arcs[_arcs[b ^ 1].next].prev = _arcs[b ^ 1].prev;
+ }
+ } else {
+ if (_arcs[a].next != -1) {
+ _arcs[_arcs[a].next].prev = b;
+ }
+ _arcs[b].next = _arcs[a].next;
+ _arcs[b].prev = a;
+ _arcs[a].next = b;
+ _arcs[b ^ 1].target = n;
+
+ (*_nodes)[n].sum += _edges[b >> 1].capacity;
+ (*_nodes)[o].curr_arc = b;
+ }
+ }
+
+ if (_arcs[a].prev != -1) {
+ _arcs[_arcs[a].prev].next = _arcs[a].next;
+ } else {
+ (*_nodes)[n].first_arc = _arcs[a].next;
+ }
+ if (_arcs[a].next != -1) {
+ _arcs[_arcs[a].next].prev = _arcs[a].prev;
+ }
+
+ (*_nodes)[n].sum -= _edges[a >> 1].capacity;
+ (*_next_rep)[(*_nodes)[n].last_rep] = m;
+ (*_nodes)[n].last_rep = (*_nodes)[m].last_rep;
+
+ if ((*_nodes)[m].prev != INVALID) {
+ (*_nodes)[(*_nodes)[m].prev].next = (*_nodes)[m].next;
+ } else{
+ _first_node = (*_nodes)[m].next;
+ }
+ if ((*_nodes)[m].next != INVALID) {
+ (*_nodes)[(*_nodes)[m].next].prev = (*_nodes)[m].prev;
+ }
+ --_node_num;
+ }
+ }
+ }
+
+ if (_node_num == 1) {
+ _first_node = INVALID;
+ return true;
+ }
+
+ return false;
+ }
+
+ /// \brief Executes the algorithm.
+ ///
+ /// Executes the algorithm.
+ ///
+ /// \pre init() must be called
+ void start() {
+ while (!processNextPhase()) {}
+ }
+
+
+ /// \brief Runs %NagamochiIbaraki algorithm.
+ ///
+ /// This method runs the %Min cut algorithm
+ ///
+ /// \note mc.run(s) is just a shortcut of the following code.
+ ///\code
+ /// mc.init();
+ /// mc.start();
+ ///\endcode
+ void run() {
+ init();
+ start();
+ }
+
+ ///@}
+
+ /// \name Query Functions
+ ///
+ /// The result of the %NagamochiIbaraki
+ /// algorithm can be obtained using these functions.\n
+ /// Before the use of these functions, either run() or start()
+ /// must be called.
+
+ ///@{
+
+ /// \brief Returns the min cut value.
+ ///
+ /// Returns the min cut value if the algorithm finished.
+ /// After the first processNextPhase() it is a value of a
+ /// valid cut in the graph.
+ Value minCutValue() const {
+ return _min_cut;
+ }
+
+ /// \brief Returns a min cut in a NodeMap.
+ ///
+ /// It sets the nodes of one of the two partitions to true and
+ /// the other partition to false.
+ /// \param cutMap A \ref concepts::WriteMap "writable" node map with
+ /// \c bool (or convertible) value type.
+ template
+ Value minCutMap(CutMap& cutMap) const {
+ for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+ cutMap.set(n, (*_cut_map)[n]);
+ }
+ return minCutValue();
+ }
+
+ ///@}
+
+ };
+}
+
+#endif
Index: lemon/network_simplex.h
===================================================================
--- lemon/network_simplex.h (revision 978)
+++ lemon/network_simplex.h (revision 1026)
@@ -48,8 +48,8 @@
/// flow problem.
///
- /// In general, %NetworkSimplex is the fastest implementation available
- /// in LEMON for this problem.
- /// Moreover, it supports both directions of the supply/demand inequality
- /// constraints. For more information, see \ref SupplyType.
+ /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest
+ /// implementations available in LEMON for this problem.
+ /// Furthermore, this class supports both directions of the supply/demand
+ /// inequality constraints. For more information, see \ref SupplyType.
///
/// Most of the parameters of the problem (except for the digraph)
@@ -64,5 +64,6 @@
/// algorithm. By default, it is the same as \c V.
///
- /// \warning Both number types must be signed and all input data must
+ /// \warning Both \c V and \c C must be signed number types.
+ /// \warning All input data (capacities, supply values, and costs) must
/// be integer.
///
@@ -126,5 +127,5 @@
/// of the algorithm.
/// By default, \ref BLOCK_SEARCH "Block Search" is used, which
- /// proved to be the most efficient and the most robust on various
+ /// turend out to be the most efficient and the most robust on various
/// test inputs.
/// However, another pivot rule can be selected using the \ref run()
@@ -167,6 +168,7 @@
typedef std::vector ValueVector;
typedef std::vector CostVector;
- typedef std::vector BoolVector;
- // Note: vector is used instead of vector for efficiency reasons
+ typedef std::vector CharVector;
+ // Note: vector is used instead of vector and
+ // vector for efficiency reasons
// State constants for arcs
@@ -177,7 +179,9 @@
};
- typedef std::vector StateVector;
- // Note: vector is used instead of vector for
- // efficiency reasons
+ // Direction constants for tree arcs
+ enum ArcDirection {
+ DIR_DOWN = -1,
+ DIR_UP = 1
+ };
private:
@@ -218,13 +222,11 @@
IntVector _succ_num;
IntVector _last_succ;
+ CharVector _pred_dir;
+ CharVector _state;
IntVector _dirty_revs;
- BoolVector _forward;
- StateVector _state;
int _root;
// Temporary data used in the current pivot iteration
int in_arc, join, u_in, v_in, u_out, v_out;
- int first, second, right, last;
- int stem, par_stem, new_stem;
Value delta;
@@ -251,5 +253,5 @@
const IntVector &_target;
const CostVector &_cost;
- const StateVector &_state;
+ const CharVector &_state;
const CostVector &_pi;
int &_in_arc;
@@ -303,5 +305,5 @@
const IntVector &_target;
const CostVector &_cost;
- const StateVector &_state;
+ const CharVector &_state;
const CostVector &_pi;
int &_in_arc;
@@ -342,5 +344,5 @@
const IntVector &_target;
const CostVector &_cost;
- const StateVector &_state;
+ const CharVector &_state;
const CostVector &_pi;
int &_in_arc;
@@ -415,5 +417,5 @@
const IntVector &_target;
const CostVector &_cost;
- const StateVector &_state;
+ const CharVector &_state;
const CostVector &_pi;
int &_in_arc;
@@ -518,5 +520,5 @@
const IntVector &_target;
const CostVector &_cost;
- const StateVector &_state;
+ const CharVector &_state;
const CostVector &_pi;
int &_in_arc;
@@ -571,9 +573,11 @@
// Check the current candidate list
int e;
+ Cost c;
for (int i = 0; i != _curr_length; ++i) {
e = _candidates[i];
- _cand_cost[e] = _state[e] *
- (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
- if (_cand_cost[e] >= 0) {
+ c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
+ if (c < 0) {
+ _cand_cost[e] = c;
+ } else {
_candidates[i--] = _candidates[--_curr_length];
}
@@ -585,7 +589,7 @@
for (e = _next_arc; e != _search_arc_num; ++e) {
- _cand_cost[e] = _state[e] *
- (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
- if (_cand_cost[e] < 0) {
+ c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
+ if (c < 0) {
+ _cand_cost[e] = c;
_candidates[_curr_length++] = e;
}
@@ -634,9 +638,10 @@
///
/// \param graph The digraph the algorithm runs on.
- /// \param arc_mixing Indicate if the arcs have to be stored in a
+ /// \param arc_mixing Indicate if the arcs will be stored in a
/// mixed order in the internal data structure.
- /// In special cases, it could lead to better overall performance,
- /// but it is usually slower. Therefore it is disabled by default.
- NetworkSimplex(const GR& graph, bool arc_mixing = false) :
+ /// In general, it leads to similar performance as using the original
+ /// arc order, but it makes the algorithm more robust and in special
+ /// cases, even significantly faster. Therefore, it is enabled by default.
+ NetworkSimplex(const GR& graph, bool arc_mixing = true) :
_graph(graph), _node_id(graph), _arc_id(graph),
_arc_mixing(arc_mixing),
@@ -731,4 +736,6 @@
///
/// \return (*this)
+ ///
+ /// \sa supplyType()
template
NetworkSimplex& supplyMap(const SupplyMap& map) {
@@ -747,5 +754,5 @@
///
/// Using this function has the same effect as using \ref supplyMap()
- /// with such a map in which \c k is assigned to \c s, \c -k is
+ /// with a map in which \c k is assigned to \c s, \c -k is
/// assigned to \c t and all other nodes have zero supply value.
///
@@ -914,5 +921,5 @@
_parent.resize(all_node_num);
_pred.resize(all_node_num);
- _forward.resize(all_node_num);
+ _pred_dir.resize(all_node_num);
_thread.resize(all_node_num);
_rev_thread.resize(all_node_num);
@@ -928,5 +935,5 @@
if (_arc_mixing) {
// Store the arcs in a mixed order
- int k = std::max(int(std::sqrt(double(_arc_num))), 10);
+ const int skip = std::max(_arc_num / _node_num, 3);
int i = 0, j = 0;
for (ArcIt a(_graph); a != INVALID; ++a) {
@@ -934,5 +941,5 @@
_source[i] = _node_id[_graph.source(a)];
_target[i] = _node_id[_graph.target(a)];
- if ((i += k) >= _arc_num) i = ++j;
+ if ((i += skip) >= _arc_num) i = ++j;
}
} else {
@@ -1117,5 +1124,5 @@
_state[e] = STATE_TREE;
if (_supply[u] >= 0) {
- _forward[u] = true;
+ _pred_dir[u] = DIR_UP;
_pi[u] = 0;
_source[e] = u;
@@ -1124,5 +1131,5 @@
_cost[e] = 0;
} else {
- _forward[u] = false;
+ _pred_dir[u] = DIR_DOWN;
_pi[u] = ART_COST;
_source[e] = _root;
@@ -1144,5 +1151,5 @@
_last_succ[u] = u;
if (_supply[u] >= 0) {
- _forward[u] = true;
+ _pred_dir[u] = DIR_UP;
_pi[u] = 0;
_pred[u] = e;
@@ -1154,5 +1161,5 @@
_state[e] = STATE_TREE;
} else {
- _forward[u] = false;
+ _pred_dir[u] = DIR_DOWN;
_pi[u] = ART_COST;
_pred[u] = f;
@@ -1185,5 +1192,5 @@
_last_succ[u] = u;
if (_supply[u] <= 0) {
- _forward[u] = false;
+ _pred_dir[u] = DIR_DOWN;
_pi[u] = 0;
_pred[u] = e;
@@ -1195,5 +1202,5 @@
_state[e] = STATE_TREE;
} else {
- _forward[u] = true;
+ _pred_dir[u] = DIR_UP;
_pi[u] = -ART_COST;
_pred[u] = f;
@@ -1238,4 +1245,5 @@
// Initialize first and second nodes according to the direction
// of the cycle
+ int first, second;
if (_state[in_arc] == STATE_LOWER) {
first = _source[in_arc];
@@ -1247,12 +1255,15 @@
delta = _cap[in_arc];
int result = 0;
- Value d;
+ Value c, d;
int e;
- // Search the cycle along the path form the first node to the root
+ // Search the cycle form the first node to the join node
for (int u = first; u != join; u = _parent[u]) {
e = _pred[u];
- d = _forward[u] ?
- _flow[e] : (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]);
+ d = _flow[e];
+ if (_pred_dir[u] == DIR_DOWN) {
+ c = _cap[e];
+ d = c >= MAX ? INF : c - d;
+ }
if (d < delta) {
delta = d;
@@ -1261,9 +1272,13 @@
}
}
- // Search the cycle along the path form the second node to the root
+
+ // Search the cycle form the second node to the join node
for (int u = second; u != join; u = _parent[u]) {
e = _pred[u];
- d = _forward[u] ?
- (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]) : _flow[e];
+ d = _flow[e];
+ if (_pred_dir[u] == DIR_UP) {
+ c = _cap[e];
+ d = c >= MAX ? INF : c - d;
+ }
if (d <= delta) {
delta = d;
@@ -1290,8 +1305,8 @@
_flow[in_arc] += val;
for (int u = _source[in_arc]; u != join; u = _parent[u]) {
- _flow[_pred[u]] += _forward[u] ? -val : val;
+ _flow[_pred[u]] -= _pred_dir[u] * val;
}
for (int u = _target[in_arc]; u != join; u = _parent[u]) {
- _flow[_pred[u]] += _forward[u] ? val : -val;
+ _flow[_pred[u]] += _pred_dir[u] * val;
}
}
@@ -1308,5 +1323,4 @@
// Update the tree structure
void updateTreeStructure() {
- int u, w;
int old_rev_thread = _rev_thread[u_out];
int old_succ_num = _succ_num[u_out];
@@ -1314,122 +1328,127 @@
v_out = _parent[u_out];
- u = _last_succ[u_in]; // the last successor of u_in
- right = _thread[u]; // the node after it
-
- // Handle the case when old_rev_thread equals to v_in
- // (it also means that join and v_out coincide)
- if (old_rev_thread == v_in) {
- last = _thread[_last_succ[u_out]];
+ // Check if u_in and u_out coincide
+ if (u_in == u_out) {
+ // Update _parent, _pred, _pred_dir
+ _parent[u_in] = v_in;
+ _pred[u_in] = in_arc;
+ _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN;
+
+ // Update _thread and _rev_thread
+ if (_thread[v_in] != u_out) {
+ int after = _thread[old_last_succ];
+ _thread[old_rev_thread] = after;
+ _rev_thread[after] = old_rev_thread;
+ after = _thread[v_in];
+ _thread[v_in] = u_out;
+ _rev_thread[u_out] = v_in;
+ _thread[old_last_succ] = after;
+ _rev_thread[after] = old_last_succ;
+ }
} else {
- last = _thread[v_in];
- }
-
- // Update _thread and _parent along the stem nodes (i.e. the nodes
- // between u_in and u_out, whose parent have to be changed)
- _thread[v_in] = stem = u_in;
- _dirty_revs.clear();
- _dirty_revs.push_back(v_in);
- par_stem = v_in;
- while (stem != u_out) {
- // Insert the next stem node into the thread list
- new_stem = _parent[stem];
- _thread[u] = new_stem;
- _dirty_revs.push_back(u);
-
- // Remove the subtree of stem from the thread list
- w = _rev_thread[stem];
- _thread[w] = right;
- _rev_thread[right] = w;
-
- // Change the parent node and shift stem nodes
- _parent[stem] = par_stem;
- par_stem = stem;
- stem = new_stem;
-
- // Update u and right
- u = _last_succ[stem] == _last_succ[par_stem] ?
- _rev_thread[par_stem] : _last_succ[stem];
- right = _thread[u];
- }
- _parent[u_out] = par_stem;
- _thread[u] = last;
- _rev_thread[last] = u;
- _last_succ[u_out] = u;
-
- // Remove the subtree of u_out from the thread list except for
- // the case when old_rev_thread equals to v_in
- // (it also means that join and v_out coincide)
- if (old_rev_thread != v_in) {
- _thread[old_rev_thread] = right;
- _rev_thread[right] = old_rev_thread;
- }
-
- // Update _rev_thread using the new _thread values
- for (int i = 0; i != int(_dirty_revs.size()); ++i) {
- u = _dirty_revs[i];
- _rev_thread[_thread[u]] = u;
- }
-
- // Update _pred, _forward, _last_succ and _succ_num for the
- // stem nodes from u_out to u_in
- int tmp_sc = 0, tmp_ls = _last_succ[u_out];
- u = u_out;
- while (u != u_in) {
- w = _parent[u];
- _pred[u] = _pred[w];
- _forward[u] = !_forward[w];
- tmp_sc += _succ_num[u] - _succ_num[w];
- _succ_num[u] = tmp_sc;
- _last_succ[w] = tmp_ls;
- u = w;
- }
- _pred[u_in] = in_arc;
- _forward[u_in] = (u_in == _source[in_arc]);
- _succ_num[u_in] = old_succ_num;
-
- // Set limits for updating _last_succ form v_in and v_out
- // towards the root
- int up_limit_in = -1;
- int up_limit_out = -1;
- if (_last_succ[join] == v_in) {
- up_limit_out = join;
- } else {
- up_limit_in = join;
+ // Handle the case when old_rev_thread equals to v_in
+ // (it also means that join and v_out coincide)
+ int thread_continue = old_rev_thread == v_in ?
+ _thread[old_last_succ] : _thread[v_in];
+
+ // Update _thread and _parent along the stem nodes (i.e. the nodes
+ // between u_in and u_out, whose parent have to be changed)
+ int stem = u_in; // the current stem node
+ int par_stem = v_in; // the new parent of stem
+ int next_stem; // the next stem node
+ int last = _last_succ[u_in]; // the last successor of stem
+ int before, after = _thread[last];
+ _thread[v_in] = u_in;
+ _dirty_revs.clear();
+ _dirty_revs.push_back(v_in);
+ while (stem != u_out) {
+ // Insert the next stem node into the thread list
+ next_stem = _parent[stem];
+ _thread[last] = next_stem;
+ _dirty_revs.push_back(last);
+
+ // Remove the subtree of stem from the thread list
+ before = _rev_thread[stem];
+ _thread[before] = after;
+ _rev_thread[after] = before;
+
+ // Change the parent node and shift stem nodes
+ _parent[stem] = par_stem;
+ par_stem = stem;
+ stem = next_stem;
+
+ // Update last and after
+ last = _last_succ[stem] == _last_succ[par_stem] ?
+ _rev_thread[par_stem] : _last_succ[stem];
+ after = _thread[last];
+ }
+ _parent[u_out] = par_stem;
+ _thread[last] = thread_continue;
+ _rev_thread[thread_continue] = last;
+ _last_succ[u_out] = last;
+
+ // Remove the subtree of u_out from the thread list except for
+ // the case when old_rev_thread equals to v_in
+ if (old_rev_thread != v_in) {
+ _thread[old_rev_thread] = after;
+ _rev_thread[after] = old_rev_thread;
+ }
+
+ // Update _rev_thread using the new _thread values
+ for (int i = 0; i != int(_dirty_revs.size()); ++i) {
+ int u = _dirty_revs[i];
+ _rev_thread[_thread[u]] = u;
+ }
+
+ // Update _pred, _pred_dir, _last_succ and _succ_num for the
+ // stem nodes from u_out to u_in
+ int tmp_sc = 0, tmp_ls = _last_succ[u_out];
+ for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) {
+ _pred[u] = _pred[p];
+ _pred_dir[u] = -_pred_dir[p];
+ tmp_sc += _succ_num[u] - _succ_num[p];
+ _succ_num[u] = tmp_sc;
+ _last_succ[p] = tmp_ls;
+ }
+ _pred[u_in] = in_arc;
+ _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN;
+ _succ_num[u_in] = old_succ_num;
}
// Update _last_succ from v_in towards the root
- for (u = v_in; u != up_limit_in && _last_succ[u] == v_in;
- u = _parent[u]) {
- _last_succ[u] = _last_succ[u_out];
- }
+ int up_limit_out = _last_succ[join] == v_in ? join : -1;
+ int last_succ_out = _last_succ[u_out];
+ for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) {
+ _last_succ[u] = last_succ_out;
+ }
+
// Update _last_succ from v_out towards the root
if (join != old_rev_thread && v_in != old_rev_thread) {
- for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
+ for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
u = _parent[u]) {
_last_succ[u] = old_rev_thread;
}
- } else {
- for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
+ }
+ else if (last_succ_out != old_last_succ) {
+ for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
u = _parent[u]) {
- _last_succ[u] = _last_succ[u_out];
+ _last_succ[u] = last_succ_out;
}
}
// Update _succ_num from v_in to join
- for (u = v_in; u != join; u = _parent[u]) {
+ for (int u = v_in; u != join; u = _parent[u]) {
_succ_num[u] += old_succ_num;
}
// Update _succ_num from v_out to join
- for (u = v_out; u != join; u = _parent[u]) {
+ for (int u = v_out; u != join; u = _parent[u]) {
_succ_num[u] -= old_succ_num;
}
}
- // Update potentials
+ // Update potentials in the subtree that has been moved
void updatePotential() {
- Cost sigma = _forward[u_in] ?
- _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] :
- _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]];
- // Update potentials in the subtree, which has been moved
+ Cost sigma = _pi[v_in] - _pi[u_in] -
+ _pred_dir[u_in] * _cost[in_arc];
int end = _thread[_last_succ[u_in]];
for (int u = u_in; u != end; u = _thread[u]) {
Index: lemon/path.h
===================================================================
--- lemon/path.h (revision 956)
+++ lemon/path.h (revision 1024)
@@ -44,5 +44,5 @@
///
/// In a sense, the path can be treated as a list of arcs. The
- /// lemon path type stores just this list. As a consequence, it
+ /// LEMON path type stores just this list. As a consequence, it
/// cannot enumerate the nodes of the path and the source node of
/// a zero length path is undefined.
@@ -136,5 +136,5 @@
void clear() { head.clear(); tail.clear(); }
- /// \brief The nth arc.
+ /// \brief The n-th arc.
///
/// \pre \c n is in the [0..length() - 1] range.
@@ -144,5 +144,5 @@
}
- /// \brief Initialize arc iterator to point to the nth arc
+ /// \brief Initialize arc iterator to point to the n-th arc
///
/// \pre \c n is in the [0..length() - 1] range.
@@ -232,5 +232,5 @@
///
/// In a sense, the path can be treated as a list of arcs. The
- /// lemon path type stores just this list. As a consequence it
+ /// LEMON path type stores just this list. As a consequence it
/// cannot enumerate the nodes in the path and the zero length paths
/// cannot store the source.
@@ -328,5 +328,5 @@
void clear() { data.clear(); }
- /// \brief The nth arc.
+ /// \brief The n-th arc.
///
/// \pre \c n is in the [0..length() - 1] range.
@@ -335,5 +335,5 @@
}
- /// \brief Initializes arc iterator to point to the nth arc.
+ /// \brief Initializes arc iterator to point to the n-th arc.
ArcIt nthIt(int n) const {
return ArcIt(*this, n);
@@ -396,5 +396,5 @@
///
/// In a sense, the path can be treated as a list of arcs. The
- /// lemon path type stores just this list. As a consequence it
+ /// LEMON path type stores just this list. As a consequence it
/// cannot enumerate the nodes in the path and the zero length paths
/// cannot store the source.
@@ -505,7 +505,7 @@
};
- /// \brief The nth arc.
- ///
- /// This function looks for the nth arc in O(n) time.
+ /// \brief The n-th arc.
+ ///
+ /// This function looks for the n-th arc in O(n) time.
/// \pre \c n is in the [0..length() - 1] range.
const Arc& nth(int n) const {
@@ -517,5 +517,5 @@
}
- /// \brief Initializes arc iterator to point to the nth arc.
+ /// \brief Initializes arc iterator to point to the n-th arc.
ArcIt nthIt(int n) const {
Node *node = first;
@@ -736,5 +736,5 @@
///
/// In a sense, the path can be treated as a list of arcs. The
- /// lemon path type stores just this list. As a consequence it
+ /// LEMON path type stores just this list. As a consequence it
/// cannot enumerate the nodes in the path and the source node of
/// a zero length path is undefined.
@@ -832,5 +832,5 @@
};
- /// \brief The nth arc.
+ /// \brief The n-th arc.
///
/// \pre \c n is in the [0..length() - 1] range.
@@ -839,5 +839,5 @@
}
- /// \brief The arc iterator pointing to the nth arc.
+ /// \brief The arc iterator pointing to the n-th arc.
ArcIt nthIt(int n) const {
return ArcIt(*this, n);
@@ -1043,5 +1043,5 @@
///
/// In a sense, the path can be treated as a list of arcs. The
- /// lemon path type stores only this list. As a consequence, it
+ /// LEMON path type stores only this list. As a consequence, it
/// cannot enumerate the nodes in the path and the zero length paths
/// cannot have a source node.
Index: test/CMakeLists.txt
===================================================================
--- test/CMakeLists.txt (revision 1065)
+++ test/CMakeLists.txt (revision 1066)
@@ -36,7 +36,10 @@
maps_test
matching_test
+ max_cardinality_search_test
+ max_clique_test
min_cost_arborescence_test
min_cost_flow_test
min_mean_cycle_test
+ nagamochi_ibaraki_test
path_test
planarity_test
Index: test/Makefile.am
===================================================================
--- test/Makefile.am (revision 953)
+++ test/Makefile.am (revision 1021)
@@ -34,7 +34,10 @@
test/maps_test \
test/matching_test \
+ test/max_cardinality_search_test \
+ test/max_clique_test \
test/min_cost_arborescence_test \
test/min_cost_flow_test \
test/min_mean_cycle_test \
+ test/nagamochi_ibaraki_test \
test/path_test \
test/planarity_test \
@@ -85,7 +88,10 @@
test_mip_test_SOURCES = test/mip_test.cc
test_matching_test_SOURCES = test/matching_test.cc
+test_max_cardinality_search_test_SOURCES = test/max_cardinality_search_test.cc
+test_max_clique_test_SOURCES = test/max_clique_test.cc
test_min_cost_arborescence_test_SOURCES = test/min_cost_arborescence_test.cc
test_min_cost_flow_test_SOURCES = test/min_cost_flow_test.cc
test_min_mean_cycle_test_SOURCES = test/min_mean_cycle_test.cc
+test_nagamochi_ibaraki_test_SOURCES = test/nagamochi_ibaraki_test.cc
test_path_test_SOURCES = test/path_test.cc
test_planarity_test_SOURCES = test/planarity_test.cc
Index: test/graph_copy_test.cc
===================================================================
--- test/graph_copy_test.cc (revision 984)
+++ test/graph_copy_test.cc (revision 989)
@@ -19,4 +19,5 @@
#include
#include
+#include
#include
#include
@@ -27,4 +28,5 @@
using namespace lemon;
+template
void digraph_copy_test() {
const int nn = 10;
@@ -52,17 +54,17 @@
}
}
-
+
// Test digraph copy
- ListDigraph to;
- ListDigraph::NodeMap tnm(to);
- ListDigraph::ArcMap tam(to);
- ListDigraph::Node tn;
- ListDigraph::Arc ta;
-
- SmartDigraph::NodeMap nr(from);
- SmartDigraph::ArcMap er(from);
-
- ListDigraph::NodeMap ncr(to);
- ListDigraph::ArcMap ecr(to);
+ GR to;
+ typename GR::template NodeMap tnm(to);
+ typename GR::template ArcMap tam(to);
+ typename GR::Node tn;
+ typename GR::Arc ta;
+
+ SmartDigraph::NodeMap nr(from);
+ SmartDigraph::ArcMap er(from);
+
+ typename GR::template NodeMap ncr(to);
+ typename GR::template ArcMap ecr(to);
digraphCopy(from, to).
@@ -87,9 +89,9 @@
}
- for (ListDigraph::NodeIt it(to); it != INVALID; ++it) {
+ for (typename GR::NodeIt it(to); it != INVALID; ++it) {
check(nr[ncr[it]] == it, "Wrong copy.");
}
- for (ListDigraph::ArcIt it(to); it != INVALID; ++it) {
+ for (typename GR::ArcIt it(to); it != INVALID; ++it) {
check(er[ecr[it]] == it, "Wrong copy.");
}
@@ -104,4 +106,5 @@
}
+template
void graph_copy_test() {
const int nn = 10;
@@ -136,19 +139,19 @@
// Test graph copy
- ListGraph to;
- ListGraph::NodeMap tnm(to);
- ListGraph::ArcMap tam(to);
- ListGraph::EdgeMap tem(to);
- ListGraph::Node tn;
- ListGraph::Arc ta;
- ListGraph::Edge te;
-
- SmartGraph::NodeMap nr(from);
- SmartGraph::ArcMap ar(from);
- SmartGraph::EdgeMap er(from);
-
- ListGraph::NodeMap ncr(to);
- ListGraph::ArcMap acr(to);
- ListGraph::EdgeMap ecr(to);
+ GR to;
+ typename GR::template NodeMap tnm(to);
+ typename GR::template ArcMap tam(to);
+ typename GR::template EdgeMap tem(to);
+ typename GR::Node tn;
+ typename GR::Arc ta;
+ typename GR::Edge te;
+
+ SmartGraph::NodeMap nr(from);
+ SmartGraph::ArcMap ar(from);
+ SmartGraph::EdgeMap er(from);
+
+ typename GR::template NodeMap ncr(to);
+ typename GR::template ArcMap acr(to);
+ typename GR::template EdgeMap ecr(to);
graphCopy(from, to).
@@ -185,12 +188,12 @@
}
- for (ListGraph::NodeIt it(to); it != INVALID; ++it) {
+ for (typename GR::NodeIt it(to); it != INVALID; ++it) {
check(nr[ncr[it]] == it, "Wrong copy.");
}
- for (ListGraph::ArcIt it(to); it != INVALID; ++it) {
+ for (typename GR::ArcIt it(to); it != INVALID; ++it) {
check(ar[acr[it]] == it, "Wrong copy.");
}
- for (ListGraph::EdgeIt it(to); it != INVALID; ++it) {
+ for (typename GR::EdgeIt it(to); it != INVALID; ++it) {
check(er[ecr[it]] == it, "Wrong copy.");
}
@@ -209,6 +212,9 @@
int main() {
- digraph_copy_test();
- graph_copy_test();
+ digraph_copy_test();
+ digraph_copy_test();
+ digraph_copy_test();
+ graph_copy_test();
+ graph_copy_test();
return 0;
Index: test/max_cardinality_search_test.cc
===================================================================
--- test/max_cardinality_search_test.cc (revision 1020)
+++ test/max_cardinality_search_test.cc (revision 1020)
@@ -0,0 +1,162 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include
+
+#include "test_tools.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+using namespace lemon;
+using namespace std;
+
+char test_lgf[] =
+ "@nodes\n"
+ "label\n"
+ "0\n"
+ "1\n"
+ "2\n"
+ "3\n"
+ "@arcs\n"
+ " label capacity\n"
+ "0 1 0 2\n"
+ "1 0 1 2\n"
+ "2 1 2 1\n"
+ "2 3 3 3\n"
+ "3 2 4 3\n"
+ "3 1 5 5\n"
+ "@attributes\n"
+ "s 0\n"
+ "x 1\n"
+ "y 2\n"
+ "z 3\n";
+
+void checkMaxCardSearchCompile() {
+
+ typedef concepts::Digraph Digraph;
+ typedef int Value;
+ typedef Digraph::Node Node;
+ typedef Digraph::Arc Arc;
+ typedef concepts::ReadMap CapMap;
+ typedef concepts::ReadWriteMap CardMap;
+ typedef concepts::ReadWriteMap ProcMap;
+ typedef Digraph::NodeMap HeapCrossRef;
+
+ Digraph g;
+ Node n,s;
+ CapMap cap;
+ CardMap card;
+ ProcMap proc;
+ HeapCrossRef crossref(g);
+
+ typedef MaxCardinalitySearch
+ ::SetCapacityMap
+ ::SetCardinalityMap
+ ::SetProcessedMap
+ ::SetStandardHeap >
+ ::Create MaxCardType;
+
+ MaxCardType maxcard(g,cap);
+ const MaxCardType& const_maxcard = maxcard;
+
+ const MaxCardType::Heap& heap_const = const_maxcard.heap();
+ MaxCardType::Heap& heap = const_cast(heap_const);
+ maxcard.heap(heap,crossref);
+
+ maxcard.capacityMap(cap).cardinalityMap(card).processedMap(proc);
+
+ maxcard.init();
+ maxcard.addSource(s);
+ n = maxcard.nextNode();
+ maxcard.processNextNode();
+ maxcard.start();
+ maxcard.run(s);
+ maxcard.run();
+ }
+
+ void checkWithIntMap( std::istringstream& input)
+ {
+ typedef SmartDigraph Digraph;
+ typedef Digraph::Node Node;
+ typedef Digraph::ArcMap CapMap;
+
+ Digraph g;
+ Node s,x,y,z,a;
+ CapMap cap(g);
+
+ DigraphReader(g,input).
+ arcMap("capacity", cap).
+ node("s",s).
+ node("x",x).
+ node("y",y).
+ node("z",z).
+ run();
+
+ MaxCardinalitySearch maxcard(g,cap);
+
+ maxcard.init();
+ maxcard.addSource(s);
+ maxcard.start(x);
+
+ check(maxcard.processed(s) and !maxcard.processed(x) and
+ !maxcard.processed(y), "Wrong processed()!");
+
+ a=maxcard.nextNode();
+ check(maxcard.processNextNode()==a,
+ "Wrong nextNode() or processNextNode() return value!");
+
+ check(maxcard.processed(a), "Wrong processNextNode()!");
+
+ maxcard.start();
+ check(maxcard.cardinality(x)==2 and maxcard.cardinality(y)>=4,
+ "Wrong cardinalities!");
+ }
+
+ void checkWithConst1Map(std::istringstream &input) {
+ typedef SmartDigraph Digraph;
+ typedef Digraph::Node Node;
+
+ Digraph g;
+ Node s,x,y,z;
+
+ DigraphReader(g,input).
+ node("s",s).
+ node("x",x).
+ node("y",y).
+ node("z",z).
+ run();
+
+ MaxCardinalitySearch maxcard(g);
+ maxcard.run(s);
+ check(maxcard.cardinality(x)==1 &&
+ maxcard.cardinality(y)+maxcard.cardinality(z)==3,
+ "Wrong cardinalities!");
+}
+
+int main() {
+
+ std::istringstream input1(test_lgf);
+ checkWithIntMap(input1);
+
+ std::istringstream input2(test_lgf);
+ checkWithConst1Map(input2);
+}
Index: test/max_clique_test.cc
===================================================================
--- test/max_clique_test.cc (revision 1022)
+++ test/max_clique_test.cc (revision 1022)
@@ -0,0 +1,188 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "test_tools.h"
+
+using namespace lemon;
+
+char test_lgf[] =
+ "@nodes\n"
+ "label max_clique\n"
+ "1 0\n"
+ "2 0\n"
+ "3 0\n"
+ "4 1\n"
+ "5 1\n"
+ "6 1\n"
+ "7 1\n"
+ "@edges\n"
+ " label\n"
+ "1 2 1\n"
+ "1 3 2\n"
+ "1 4 3\n"
+ "1 6 4\n"
+ "2 3 5\n"
+ "2 5 6\n"
+ "2 7 7\n"
+ "3 4 8\n"
+ "3 5 9\n"
+ "4 5 10\n"
+ "4 6 11\n"
+ "4 7 12\n"
+ "5 6 13\n"
+ "5 7 14\n"
+ "6 7 15\n";
+
+
+// Check with general graphs
+template
+void checkMaxCliqueGeneral(Param rule) {
+ typedef ListGraph GR;
+ typedef GrossoLocatelliPullanMc McAlg;
+ typedef McAlg::CliqueNodeIt CliqueIt;
+
+ // Basic tests
+ {
+ GR g;
+ GR::NodeMap map(g);
+ McAlg mc(g);
+ mc.iterationLimit(50);
+ check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 0, "Wrong clique size");
+ check(CliqueIt(mc) == INVALID, "Wrong CliqueNodeIt");
+
+ GR::Node u = g.addNode();
+ check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 1, "Wrong clique size");
+ mc.cliqueMap(map);
+ check(map[u], "Wrong clique map");
+ CliqueIt it1(mc);
+ check(static_cast(it1) == u && ++it1 == INVALID,
+ "Wrong CliqueNodeIt");
+
+ GR::Node v = g.addNode();
+ check(mc.run(rule) == McAlg::ITERATION_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 1, "Wrong clique size");
+ mc.cliqueMap(map);
+ check((map[u] && !map[v]) || (map[v] && !map[u]), "Wrong clique map");
+ CliqueIt it2(mc);
+ check(it2 != INVALID && ++it2 == INVALID, "Wrong CliqueNodeIt");
+
+ g.addEdge(u, v);
+ check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 2, "Wrong clique size");
+ mc.cliqueMap(map);
+ check(map[u] && map[v], "Wrong clique map");
+ CliqueIt it3(mc);
+ check(it3 != INVALID && ++it3 != INVALID && ++it3 == INVALID,
+ "Wrong CliqueNodeIt");
+ }
+
+ // Test graph
+ {
+ GR g;
+ GR::NodeMap max_clique(g);
+ GR::NodeMap map(g);
+ std::istringstream input(test_lgf);
+ graphReader(g, input)
+ .nodeMap("max_clique", max_clique)
+ .run();
+
+ McAlg mc(g);
+ mc.iterationLimit(50);
+ check(mc.run(rule) == McAlg::ITERATION_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 4, "Wrong clique size");
+ mc.cliqueMap(map);
+ for (GR::NodeIt n(g); n != INVALID; ++n) {
+ check(map[n] == max_clique[n], "Wrong clique map");
+ }
+ int cnt = 0;
+ for (CliqueIt n(mc); n != INVALID; ++n) {
+ cnt++;
+ check(map[n] && max_clique[n], "Wrong CliqueNodeIt");
+ }
+ check(cnt == 4, "Wrong CliqueNodeIt");
+ }
+}
+
+// Check with full graphs
+template
+void checkMaxCliqueFullGraph(Param rule) {
+ typedef FullGraph GR;
+ typedef GrossoLocatelliPullanMc McAlg;
+ typedef McAlg::CliqueNodeIt CliqueIt;
+
+ for (int size = 0; size <= 40; size = size * 3 + 1) {
+ GR g(size);
+ GR::NodeMap map(g);
+ McAlg mc(g);
+ check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == size, "Wrong clique size");
+ mc.cliqueMap(map);
+ for (GR::NodeIt n(g); n != INVALID; ++n) {
+ check(map[n], "Wrong clique map");
+ }
+ int cnt = 0;
+ for (CliqueIt n(mc); n != INVALID; ++n) cnt++;
+ check(cnt == size, "Wrong CliqueNodeIt");
+ }
+}
+
+// Check with grid graphs
+template
+void checkMaxCliqueGridGraph(Param rule) {
+ GridGraph g(5, 7);
+ GridGraph::NodeMap map(g);
+ GrossoLocatelliPullanMc mc(g);
+
+ mc.iterationLimit(100);
+ check(mc.run(rule) == mc.ITERATION_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 2, "Wrong clique size");
+
+ mc.stepLimit(100);
+ check(mc.run(rule) == mc.STEP_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 2, "Wrong clique size");
+
+ mc.sizeLimit(2);
+ check(mc.run(rule) == mc.SIZE_LIMIT, "Wrong termination cause");
+ check(mc.cliqueSize() == 2, "Wrong clique size");
+}
+
+
+int main() {
+ checkMaxCliqueGeneral(GrossoLocatelliPullanMc::RANDOM);
+ checkMaxCliqueGeneral(GrossoLocatelliPullanMc::DEGREE_BASED);
+ checkMaxCliqueGeneral(GrossoLocatelliPullanMc::PENALTY_BASED);
+
+ checkMaxCliqueFullGraph(GrossoLocatelliPullanMc::RANDOM);
+ checkMaxCliqueFullGraph(GrossoLocatelliPullanMc::DEGREE_BASED);
+ checkMaxCliqueFullGraph(GrossoLocatelliPullanMc::PENALTY_BASED);
+
+ checkMaxCliqueGridGraph(GrossoLocatelliPullanMc::RANDOM);
+ checkMaxCliqueGridGraph(GrossoLocatelliPullanMc::DEGREE_BASED);
+ checkMaxCliqueGridGraph(GrossoLocatelliPullanMc::PENALTY_BASED);
+
+ return 0;
+}
Index: test/nagamochi_ibaraki_test.cc
===================================================================
--- test/nagamochi_ibaraki_test.cc (revision 1017)
+++ test/nagamochi_ibaraki_test.cc (revision 1017)
@@ -0,0 +1,141 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include
+
+#include
+#include
+#include ]