alpar@877: /* -*- mode: C++; indent-tabs-mode: nil; -*- kpeter@808: * alpar@877: * This file is a part of LEMON, a generic C++ optimization library. kpeter@808: * alpar@877: * Copyright (C) 2003-2010 kpeter@808: * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport kpeter@808: * (Egervary Research Group on Combinatorial Optimization, EGRES). kpeter@808: * kpeter@808: * Permission to use, modify and distribute this software is granted kpeter@808: * provided that this copyright notice appears in all copies. For kpeter@808: * precise terms see the accompanying LICENSE file. kpeter@808: * kpeter@808: * This software is provided "AS IS" with no warranty of any kind, kpeter@808: * express or implied, and with no claim as to its suitability for any kpeter@808: * purpose. kpeter@808: * kpeter@808: */ kpeter@808: kpeter@808: #ifndef LEMON_COST_SCALING_H kpeter@808: #define LEMON_COST_SCALING_H kpeter@808: kpeter@808: /// \ingroup min_cost_flow_algs kpeter@808: /// \file kpeter@808: /// \brief Cost scaling algorithm for finding a minimum cost flow. kpeter@808: kpeter@808: #include kpeter@808: #include kpeter@808: #include kpeter@808: kpeter@808: #include kpeter@808: #include kpeter@808: #include kpeter@809: #include kpeter@808: #include kpeter@808: #include kpeter@808: kpeter@808: namespace lemon { kpeter@808: kpeter@809: /// \brief Default traits class of CostScaling algorithm. kpeter@809: /// kpeter@809: /// Default traits class of CostScaling algorithm. kpeter@809: /// \tparam GR Digraph type. kpeter@812: /// \tparam V The number type used for flow amounts, capacity bounds kpeter@809: /// and supply values. By default it is \c int. kpeter@812: /// \tparam C The number type used for costs and potentials. kpeter@809: /// By default it is the same as \c V. kpeter@809: #ifdef DOXYGEN kpeter@809: template kpeter@809: #else kpeter@809: template < typename GR, typename V = int, typename C = V, kpeter@809: bool integer = std::numeric_limits::is_integer > kpeter@809: #endif kpeter@809: struct CostScalingDefaultTraits kpeter@809: { kpeter@809: /// The type of the digraph kpeter@809: typedef GR Digraph; kpeter@809: /// The type of the flow amounts, capacity bounds and supply values kpeter@809: typedef V Value; kpeter@809: /// The type of the arc costs kpeter@809: typedef C Cost; kpeter@809: kpeter@809: /// \brief The large cost type used for internal computations kpeter@809: /// kpeter@809: /// The large cost type used for internal computations. kpeter@809: /// It is \c long \c long if the \c Cost type is integer, kpeter@809: /// otherwise it is \c double. kpeter@809: /// \c Cost must be convertible to \c LargeCost. kpeter@809: typedef double LargeCost; kpeter@809: }; kpeter@809: kpeter@809: // Default traits class for integer cost types kpeter@809: template kpeter@809: struct CostScalingDefaultTraits kpeter@809: { kpeter@809: typedef GR Digraph; kpeter@809: typedef V Value; kpeter@809: typedef C Cost; kpeter@809: #ifdef LEMON_HAVE_LONG_LONG kpeter@809: typedef long long LargeCost; kpeter@809: #else kpeter@809: typedef long LargeCost; kpeter@809: #endif kpeter@809: }; kpeter@809: kpeter@809: kpeter@808: /// \addtogroup min_cost_flow_algs kpeter@808: /// @{ kpeter@808: kpeter@809: /// \brief Implementation of the Cost Scaling algorithm for kpeter@809: /// finding a \ref min_cost_flow "minimum cost flow". kpeter@808: /// kpeter@809: /// \ref CostScaling implements a cost scaling algorithm that performs kpeter@813: /// push/augment and relabel operations for finding a \ref min_cost_flow alpar@1053: /// "minimum cost flow" \cite amo93networkflows, \cite goldberg90approximation, alpar@1053: /// \cite goldberg97efficient, \cite bunnagel98efficient. kpeter@813: /// It is a highly efficient primal-dual solution method, which kpeter@809: /// can be viewed as the generalization of the \ref Preflow kpeter@809: /// "preflow push-relabel" algorithm for the maximum flow problem. kpeter@1049: /// It is a polynomial algorithm, its running time complexity is kpeter@1049: /// \f$O(n^2e\log(nK))\f$, where K denotes the maximum arc cost. kpeter@808: /// kpeter@919: /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest kpeter@1003: /// implementations available in LEMON for solving this problem. kpeter@1003: /// (For more information, see \ref min_cost_flow_algs "the module page".) kpeter@919: /// kpeter@809: /// Most of the parameters of the problem (except for the digraph) kpeter@809: /// can be given using separate functions, and the algorithm can be kpeter@809: /// executed using the \ref run() function. If some parameters are not kpeter@809: /// specified, then default values will be used. kpeter@808: /// kpeter@809: /// \tparam GR The digraph type the algorithm runs on. kpeter@812: /// \tparam V The number type used for flow amounts, capacity bounds kpeter@825: /// and supply values in the algorithm. By default, it is \c int. kpeter@812: /// \tparam C The number type used for costs and potentials in the kpeter@825: /// algorithm. By default, it is the same as \c V. kpeter@825: /// \tparam TR The traits class that defines various types used by the kpeter@825: /// algorithm. By default, it is \ref CostScalingDefaultTraits kpeter@825: /// "CostScalingDefaultTraits". kpeter@825: /// In most cases, this parameter should not be set directly, kpeter@825: /// consider to use the named template parameters instead. kpeter@808: /// kpeter@921: /// \warning Both \c V and \c C must be signed number types. kpeter@921: /// \warning All input data (capacities, supply values, and costs) must kpeter@809: /// be integer. kpeter@919: /// \warning This algorithm does not support negative costs for kpeter@919: /// arcs having infinite upper bound. kpeter@810: /// kpeter@810: /// \note %CostScaling provides three different internal methods, kpeter@810: /// from which the most efficient one is used by default. kpeter@810: /// For more information, see \ref Method. kpeter@809: #ifdef DOXYGEN kpeter@809: template kpeter@809: #else kpeter@809: template < typename GR, typename V = int, typename C = V, kpeter@809: typename TR = CostScalingDefaultTraits > kpeter@809: #endif kpeter@808: class CostScaling kpeter@808: { kpeter@809: public: kpeter@808: kpeter@809: /// The type of the digraph kpeter@809: typedef typename TR::Digraph Digraph; kpeter@809: /// The type of the flow amounts, capacity bounds and supply values kpeter@809: typedef typename TR::Value Value; kpeter@809: /// The type of the arc costs kpeter@809: typedef typename TR::Cost Cost; kpeter@808: kpeter@809: /// \brief The large cost type kpeter@809: /// kpeter@809: /// The large cost type used for internal computations. kpeter@825: /// By default, it is \c long \c long if the \c Cost type is integer, kpeter@809: /// otherwise it is \c double. kpeter@809: typedef typename TR::LargeCost LargeCost; kpeter@808: kpeter@809: /// The \ref CostScalingDefaultTraits "traits class" of the algorithm kpeter@809: typedef TR Traits; kpeter@808: kpeter@808: public: kpeter@808: kpeter@809: /// \brief Problem type constants for the \c run() function. kpeter@809: /// kpeter@809: /// Enum type containing the problem type constants that can be kpeter@809: /// returned by the \ref run() function of the algorithm. kpeter@809: enum ProblemType { kpeter@809: /// The problem has no feasible solution (flow). kpeter@809: INFEASIBLE, kpeter@809: /// The problem has optimal solution (i.e. it is feasible and kpeter@809: /// bounded), and the algorithm has found optimal flow and node kpeter@809: /// potentials (primal and dual solutions). kpeter@809: OPTIMAL, kpeter@809: /// The digraph contains an arc of negative cost and infinite kpeter@809: /// upper bound. It means that the objective function is unbounded kpeter@812: /// on that arc, however, note that it could actually be bounded kpeter@809: /// over the feasible flows, but this algroithm cannot handle kpeter@809: /// these cases. kpeter@809: UNBOUNDED kpeter@809: }; kpeter@808: kpeter@810: /// \brief Constants for selecting the internal method. kpeter@810: /// kpeter@810: /// Enum type containing constants for selecting the internal method kpeter@810: /// for the \ref run() function. kpeter@810: /// kpeter@810: /// \ref CostScaling provides three internal methods that differ mainly kpeter@810: /// in their base operations, which are used in conjunction with the kpeter@810: /// relabel operation. kpeter@810: /// By default, the so called \ref PARTIAL_AUGMENT kpeter@919: /// "Partial Augment-Relabel" method is used, which turned out to be kpeter@810: /// the most efficient and the most robust on various test inputs. kpeter@810: /// However, the other methods can be selected using the \ref run() kpeter@810: /// function with the proper parameter. kpeter@810: enum Method { kpeter@810: /// Local push operations are used, i.e. flow is moved only on one kpeter@810: /// admissible arc at once. kpeter@810: PUSH, kpeter@810: /// Augment operations are used, i.e. flow is moved on admissible kpeter@810: /// paths from a node with excess to a node with deficit. kpeter@810: AUGMENT, alpar@877: /// Partial augment operations are used, i.e. flow is moved on kpeter@810: /// admissible paths started from a node with excess, but the kpeter@810: /// lengths of these paths are limited. This method can be viewed kpeter@810: /// as a combined version of the previous two operations. kpeter@810: PARTIAL_AUGMENT kpeter@810: }; kpeter@810: kpeter@808: private: kpeter@808: kpeter@809: TEMPLATE_DIGRAPH_TYPEDEFS(GR); kpeter@808: kpeter@809: typedef std::vector IntVector; kpeter@809: typedef std::vector ValueVector; kpeter@809: typedef std::vector CostVector; kpeter@809: typedef std::vector LargeCostVector; kpeter@839: typedef std::vector BoolVector; kpeter@839: // Note: vector is used instead of vector for efficiency reasons kpeter@808: kpeter@809: private: alpar@877: kpeter@809: template kpeter@820: class StaticVectorMap { kpeter@808: public: kpeter@809: typedef KT Key; kpeter@809: typedef VT Value; alpar@877: kpeter@820: StaticVectorMap(std::vector& v) : _v(v) {} alpar@877: kpeter@809: const Value& operator[](const Key& key) const { kpeter@809: return _v[StaticDigraph::id(key)]; kpeter@808: } kpeter@808: kpeter@809: Value& operator[](const Key& key) { kpeter@809: return _v[StaticDigraph::id(key)]; kpeter@809: } alpar@877: kpeter@809: void set(const Key& key, const Value& val) { kpeter@809: _v[StaticDigraph::id(key)] = val; kpeter@808: } kpeter@808: kpeter@809: private: kpeter@809: std::vector& _v; kpeter@809: }; kpeter@809: kpeter@820: typedef StaticVectorMap LargeCostArcMap; kpeter@808: kpeter@808: private: kpeter@808: kpeter@809: // Data related to the underlying digraph kpeter@809: const GR &_graph; kpeter@809: int _node_num; kpeter@809: int _arc_num; kpeter@809: int _res_node_num; kpeter@809: int _res_arc_num; kpeter@809: int _root; kpeter@808: kpeter@809: // Parameters of the problem kpeter@809: bool _have_lower; kpeter@809: Value _sum_supply; kpeter@839: int _sup_node_num; kpeter@808: kpeter@809: // Data structures for storing the digraph kpeter@809: IntNodeMap _node_id; kpeter@809: IntArcMap _arc_idf; kpeter@809: IntArcMap _arc_idb; kpeter@809: IntVector _first_out; kpeter@809: BoolVector _forward; kpeter@809: IntVector _source; kpeter@809: IntVector _target; kpeter@809: IntVector _reverse; kpeter@809: kpeter@809: // Node and arc data kpeter@809: ValueVector _lower; kpeter@809: ValueVector _upper; kpeter@809: CostVector _scost; kpeter@809: ValueVector _supply; kpeter@809: kpeter@809: ValueVector _res_cap; kpeter@809: LargeCostVector _cost; kpeter@809: LargeCostVector _pi; kpeter@809: ValueVector _excess; kpeter@809: IntVector _next_out; kpeter@809: std::deque _active_nodes; kpeter@809: kpeter@809: // Data for scaling kpeter@809: LargeCost _epsilon; kpeter@808: int _alpha; kpeter@808: kpeter@839: IntVector _buckets; kpeter@839: IntVector _bucket_next; kpeter@839: IntVector _bucket_prev; kpeter@839: IntVector _rank; kpeter@839: int _max_rank; alpar@877: kpeter@809: public: alpar@877: kpeter@809: /// \brief Constant for infinite upper bounds (capacities). kpeter@809: /// kpeter@809: /// Constant for infinite upper bounds (capacities). kpeter@809: /// It is \c std::numeric_limits::infinity() if available, kpeter@809: /// \c std::numeric_limits::max() otherwise. kpeter@809: const Value INF; kpeter@809: kpeter@808: public: kpeter@808: kpeter@809: /// \name Named Template Parameters kpeter@809: /// @{ kpeter@809: kpeter@809: template kpeter@809: struct SetLargeCostTraits : public Traits { kpeter@809: typedef T LargeCost; kpeter@809: }; kpeter@809: kpeter@809: /// \brief \ref named-templ-param "Named parameter" for setting kpeter@809: /// \c LargeCost type. kpeter@808: /// kpeter@809: /// \ref named-templ-param "Named parameter" for setting \c LargeCost kpeter@809: /// type, which is used for internal computations in the algorithm. kpeter@809: /// \c Cost must be convertible to \c LargeCost. kpeter@809: template kpeter@809: struct SetLargeCost kpeter@809: : public CostScaling > { kpeter@809: typedef CostScaling > Create; kpeter@809: }; kpeter@809: kpeter@809: /// @} kpeter@809: kpeter@863: protected: kpeter@863: kpeter@863: CostScaling() {} kpeter@863: kpeter@809: public: kpeter@809: kpeter@809: /// \brief Constructor. kpeter@808: /// kpeter@809: /// The constructor of the class. kpeter@809: /// kpeter@809: /// \param graph The digraph the algorithm runs on. kpeter@809: CostScaling(const GR& graph) : kpeter@809: _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph), kpeter@809: INF(std::numeric_limits::has_infinity ? kpeter@809: std::numeric_limits::infinity() : kpeter@809: std::numeric_limits::max()) kpeter@808: { kpeter@812: // Check the number types kpeter@809: LEMON_ASSERT(std::numeric_limits::is_signed, kpeter@809: "The flow type of CostScaling must be signed"); kpeter@809: LEMON_ASSERT(std::numeric_limits::is_signed, kpeter@809: "The cost type of CostScaling must be signed"); alpar@877: kpeter@830: // Reset data structures kpeter@809: reset(); kpeter@808: } kpeter@808: kpeter@809: /// \name Parameters kpeter@809: /// The parameters of the algorithm can be specified using these kpeter@809: /// functions. kpeter@809: kpeter@809: /// @{ kpeter@809: kpeter@809: /// \brief Set the lower bounds on the arcs. kpeter@808: /// kpeter@809: /// This function sets the lower bounds on the arcs. kpeter@809: /// If it is not used before calling \ref run(), the lower bounds kpeter@809: /// will be set to zero on all arcs. kpeter@808: /// kpeter@809: /// \param map An arc map storing the lower bounds. kpeter@809: /// Its \c Value type must be convertible to the \c Value type kpeter@809: /// of the algorithm. kpeter@809: /// kpeter@809: /// \return (*this) kpeter@809: template kpeter@809: CostScaling& lowerMap(const LowerMap& map) { kpeter@809: _have_lower = true; kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: _lower[_arc_idf[a]] = map[a]; kpeter@809: _lower[_arc_idb[a]] = map[a]; kpeter@808: } kpeter@808: return *this; kpeter@808: } kpeter@808: kpeter@809: /// \brief Set the upper bounds (capacities) on the arcs. kpeter@808: /// kpeter@809: /// This function sets the upper bounds (capacities) on the arcs. kpeter@809: /// If it is not used before calling \ref run(), the upper bounds kpeter@809: /// will be set to \ref INF on all arcs (i.e. the flow value will be kpeter@812: /// unbounded from above). kpeter@808: /// kpeter@809: /// \param map An arc map storing the upper bounds. kpeter@809: /// Its \c Value type must be convertible to the \c Value type kpeter@809: /// of the algorithm. kpeter@809: /// kpeter@809: /// \return (*this) kpeter@809: template kpeter@809: CostScaling& upperMap(const UpperMap& map) { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: _upper[_arc_idf[a]] = map[a]; kpeter@808: } kpeter@808: return *this; kpeter@808: } kpeter@808: kpeter@809: /// \brief Set the costs of the arcs. kpeter@809: /// kpeter@809: /// This function sets the costs of the arcs. kpeter@809: /// If it is not used before calling \ref run(), the costs kpeter@809: /// will be set to \c 1 on all arcs. kpeter@809: /// kpeter@809: /// \param map An arc map storing the costs. kpeter@809: /// Its \c Value type must be convertible to the \c Cost type kpeter@809: /// of the algorithm. kpeter@809: /// kpeter@809: /// \return (*this) kpeter@809: template kpeter@809: CostScaling& costMap(const CostMap& map) { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: _scost[_arc_idf[a]] = map[a]; kpeter@809: _scost[_arc_idb[a]] = -map[a]; kpeter@809: } kpeter@809: return *this; kpeter@809: } kpeter@809: kpeter@809: /// \brief Set the supply values of the nodes. kpeter@809: /// kpeter@809: /// This function sets the supply values of the nodes. kpeter@809: /// If neither this function nor \ref stSupply() is used before kpeter@809: /// calling \ref run(), the supply of each node will be set to zero. kpeter@809: /// kpeter@809: /// \param map A node map storing the supply values. kpeter@809: /// Its \c Value type must be convertible to the \c Value type kpeter@809: /// of the algorithm. kpeter@809: /// kpeter@809: /// \return (*this) kpeter@809: template kpeter@809: CostScaling& supplyMap(const SupplyMap& map) { kpeter@809: for (NodeIt n(_graph); n != INVALID; ++n) { kpeter@809: _supply[_node_id[n]] = map[n]; kpeter@809: } kpeter@809: return *this; kpeter@809: } kpeter@809: kpeter@809: /// \brief Set single source and target nodes and a supply value. kpeter@809: /// kpeter@809: /// This function sets a single source node and a single target node kpeter@809: /// and the required flow value. kpeter@809: /// If neither this function nor \ref supplyMap() is used before kpeter@809: /// calling \ref run(), the supply of each node will be set to zero. kpeter@809: /// kpeter@809: /// Using this function has the same effect as using \ref supplyMap() kpeter@919: /// with a map in which \c k is assigned to \c s, \c -k is kpeter@809: /// assigned to \c t and all other nodes have zero supply value. kpeter@809: /// kpeter@809: /// \param s The source node. kpeter@809: /// \param t The target node. kpeter@809: /// \param k The required amount of flow from node \c s to node \c t kpeter@809: /// (i.e. the supply of \c s and the demand of \c t). kpeter@809: /// kpeter@809: /// \return (*this) kpeter@809: CostScaling& stSupply(const Node& s, const Node& t, Value k) { kpeter@809: for (int i = 0; i != _res_node_num; ++i) { kpeter@809: _supply[i] = 0; kpeter@809: } kpeter@809: _supply[_node_id[s]] = k; kpeter@809: _supply[_node_id[t]] = -k; kpeter@809: return *this; kpeter@809: } alpar@877: kpeter@809: /// @} kpeter@809: kpeter@808: /// \name Execution control kpeter@809: /// The algorithm can be executed using \ref run(). kpeter@808: kpeter@808: /// @{ kpeter@808: kpeter@808: /// \brief Run the algorithm. kpeter@808: /// kpeter@809: /// This function runs the algorithm. kpeter@809: /// The paramters can be specified using functions \ref lowerMap(), kpeter@809: /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). kpeter@809: /// For example, kpeter@809: /// \code kpeter@809: /// CostScaling cs(graph); kpeter@809: /// cs.lowerMap(lower).upperMap(upper).costMap(cost) kpeter@809: /// .supplyMap(sup).run(); kpeter@809: /// \endcode kpeter@809: /// kpeter@830: /// This function can be called more than once. All the given parameters kpeter@830: /// are kept for the next call, unless \ref resetParams() or \ref reset() kpeter@830: /// is used, thus only the modified parameters have to be set again. kpeter@830: /// If the underlying digraph was also modified after the construction kpeter@830: /// of the class (or the last \ref reset() call), then the \ref reset() kpeter@830: /// function must be called. kpeter@808: /// kpeter@810: /// \param method The internal method that will be used in the kpeter@810: /// algorithm. For more information, see \ref Method. kpeter@938: /// \param factor The cost scaling factor. It must be at least two. kpeter@808: /// kpeter@809: /// \return \c INFEASIBLE if no feasible flow exists, kpeter@809: /// \n \c OPTIMAL if the problem has optimal solution kpeter@809: /// (i.e. it is feasible and bounded), and the algorithm has found kpeter@809: /// optimal flow and node potentials (primal and dual solutions), kpeter@809: /// \n \c UNBOUNDED if the digraph contains an arc of negative cost kpeter@809: /// and infinite upper bound. It means that the objective function kpeter@812: /// is unbounded on that arc, however, note that it could actually be kpeter@809: /// bounded over the feasible flows, but this algroithm cannot handle kpeter@809: /// these cases. kpeter@809: /// kpeter@810: /// \see ProblemType, Method kpeter@830: /// \see resetParams(), reset() kpeter@938: ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) { kpeter@938: LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2"); kpeter@810: _alpha = factor; kpeter@809: ProblemType pt = init(); kpeter@809: if (pt != OPTIMAL) return pt; kpeter@810: start(method); kpeter@809: return OPTIMAL; kpeter@809: } kpeter@809: kpeter@809: /// \brief Reset all the parameters that have been given before. kpeter@809: /// kpeter@809: /// This function resets all the paramaters that have been given kpeter@809: /// before using functions \ref lowerMap(), \ref upperMap(), kpeter@809: /// \ref costMap(), \ref supplyMap(), \ref stSupply(). kpeter@809: /// kpeter@830: /// It is useful for multiple \ref run() calls. Basically, all the given kpeter@830: /// parameters are kept for the next \ref run() call, unless kpeter@830: /// \ref resetParams() or \ref reset() is used. kpeter@830: /// If the underlying digraph was also modified after the construction kpeter@830: /// of the class or the last \ref reset() call, then the \ref reset() kpeter@830: /// function must be used, otherwise \ref resetParams() is sufficient. kpeter@809: /// kpeter@809: /// For example, kpeter@809: /// \code kpeter@809: /// CostScaling cs(graph); kpeter@809: /// kpeter@809: /// // First run kpeter@809: /// cs.lowerMap(lower).upperMap(upper).costMap(cost) kpeter@809: /// .supplyMap(sup).run(); kpeter@809: /// kpeter@830: /// // Run again with modified cost map (resetParams() is not called, kpeter@809: /// // so only the cost map have to be set again) kpeter@809: /// cost[e] += 100; kpeter@809: /// cs.costMap(cost).run(); kpeter@809: /// kpeter@830: /// // Run again from scratch using resetParams() kpeter@809: /// // (the lower bounds will be set to zero on all arcs) kpeter@830: /// cs.resetParams(); kpeter@809: /// cs.upperMap(capacity).costMap(cost) kpeter@809: /// .supplyMap(sup).run(); kpeter@809: /// \endcode kpeter@809: /// kpeter@809: /// \return (*this) kpeter@830: /// kpeter@830: /// \see reset(), run() kpeter@830: CostScaling& resetParams() { kpeter@809: for (int i = 0; i != _res_node_num; ++i) { kpeter@809: _supply[i] = 0; kpeter@808: } kpeter@809: int limit = _first_out[_root]; kpeter@809: for (int j = 0; j != limit; ++j) { kpeter@809: _lower[j] = 0; kpeter@809: _upper[j] = INF; kpeter@809: _scost[j] = _forward[j] ? 1 : -1; kpeter@809: } kpeter@809: for (int j = limit; j != _res_arc_num; ++j) { kpeter@809: _lower[j] = 0; kpeter@809: _upper[j] = INF; kpeter@809: _scost[j] = 0; kpeter@809: _scost[_reverse[j]] = 0; alpar@877: } kpeter@809: _have_lower = false; kpeter@809: return *this; kpeter@808: } kpeter@808: kpeter@934: /// \brief Reset the internal data structures and all the parameters kpeter@934: /// that have been given before. kpeter@830: /// kpeter@934: /// This function resets the internal data structures and all the kpeter@934: /// paramaters that have been given before using functions \ref lowerMap(), kpeter@934: /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). kpeter@830: /// kpeter@934: /// It is useful for multiple \ref run() calls. By default, all the given kpeter@934: /// parameters are kept for the next \ref run() call, unless kpeter@934: /// \ref resetParams() or \ref reset() is used. kpeter@934: /// If the underlying digraph was also modified after the construction kpeter@934: /// of the class or the last \ref reset() call, then the \ref reset() kpeter@934: /// function must be used, otherwise \ref resetParams() is sufficient. kpeter@934: /// kpeter@934: /// See \ref resetParams() for examples. kpeter@934: /// kpeter@830: /// \return (*this) kpeter@934: /// kpeter@934: /// \see resetParams(), run() kpeter@830: CostScaling& reset() { kpeter@830: // Resize vectors kpeter@830: _node_num = countNodes(_graph); kpeter@830: _arc_num = countArcs(_graph); kpeter@830: _res_node_num = _node_num + 1; kpeter@830: _res_arc_num = 2 * (_arc_num + _node_num); kpeter@830: _root = _node_num; kpeter@830: kpeter@830: _first_out.resize(_res_node_num + 1); kpeter@830: _forward.resize(_res_arc_num); kpeter@830: _source.resize(_res_arc_num); kpeter@830: _target.resize(_res_arc_num); kpeter@830: _reverse.resize(_res_arc_num); kpeter@830: kpeter@830: _lower.resize(_res_arc_num); kpeter@830: _upper.resize(_res_arc_num); kpeter@830: _scost.resize(_res_arc_num); kpeter@830: _supply.resize(_res_node_num); alpar@877: kpeter@830: _res_cap.resize(_res_arc_num); kpeter@830: _cost.resize(_res_arc_num); kpeter@830: _pi.resize(_res_node_num); kpeter@830: _excess.resize(_res_node_num); kpeter@830: _next_out.resize(_res_node_num); kpeter@830: kpeter@830: // Copy the graph kpeter@830: int i = 0, j = 0, k = 2 * _arc_num + _node_num; kpeter@830: for (NodeIt n(_graph); n != INVALID; ++n, ++i) { kpeter@830: _node_id[n] = i; kpeter@830: } kpeter@830: i = 0; kpeter@830: for (NodeIt n(_graph); n != INVALID; ++n, ++i) { kpeter@830: _first_out[i] = j; kpeter@830: for (OutArcIt a(_graph, n); a != INVALID; ++a, ++j) { kpeter@830: _arc_idf[a] = j; kpeter@830: _forward[j] = true; kpeter@830: _source[j] = i; kpeter@830: _target[j] = _node_id[_graph.runningNode(a)]; kpeter@830: } kpeter@830: for (InArcIt a(_graph, n); a != INVALID; ++a, ++j) { kpeter@830: _arc_idb[a] = j; kpeter@830: _forward[j] = false; kpeter@830: _source[j] = i; kpeter@830: _target[j] = _node_id[_graph.runningNode(a)]; kpeter@830: } kpeter@830: _forward[j] = false; kpeter@830: _source[j] = i; kpeter@830: _target[j] = _root; kpeter@830: _reverse[j] = k; kpeter@830: _forward[k] = true; kpeter@830: _source[k] = _root; kpeter@830: _target[k] = i; kpeter@830: _reverse[k] = j; kpeter@830: ++j; ++k; kpeter@830: } kpeter@830: _first_out[i] = j; kpeter@830: _first_out[_res_node_num] = k; kpeter@830: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@830: int fi = _arc_idf[a]; kpeter@830: int bi = _arc_idb[a]; kpeter@830: _reverse[fi] = bi; kpeter@830: _reverse[bi] = fi; kpeter@830: } alpar@877: kpeter@830: // Reset parameters kpeter@830: resetParams(); kpeter@830: return *this; kpeter@830: } kpeter@830: kpeter@808: /// @} kpeter@808: kpeter@808: /// \name Query Functions kpeter@809: /// The results of the algorithm can be obtained using these kpeter@808: /// functions.\n kpeter@809: /// The \ref run() function must be called before using them. kpeter@808: kpeter@808: /// @{ kpeter@808: kpeter@809: /// \brief Return the total cost of the found flow. kpeter@808: /// kpeter@809: /// This function returns the total cost of the found flow. kpeter@809: /// Its complexity is O(e). kpeter@809: /// kpeter@809: /// \note The return type of the function can be specified as a kpeter@809: /// template parameter. For example, kpeter@809: /// \code kpeter@809: /// cs.totalCost(); kpeter@809: /// \endcode kpeter@809: /// It is useful if the total cost cannot be stored in the \c Cost kpeter@809: /// type of the algorithm, which is the default return type of the kpeter@809: /// function. kpeter@808: /// kpeter@808: /// \pre \ref run() must be called before using this function. kpeter@809: template kpeter@809: Number totalCost() const { kpeter@809: Number c = 0; kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: int i = _arc_idb[a]; kpeter@809: c += static_cast(_res_cap[i]) * kpeter@809: (-static_cast(_scost[i])); kpeter@809: } kpeter@809: return c; kpeter@808: } kpeter@808: kpeter@809: #ifndef DOXYGEN kpeter@809: Cost totalCost() const { kpeter@809: return totalCost(); kpeter@808: } kpeter@809: #endif kpeter@808: kpeter@808: /// \brief Return the flow on the given arc. kpeter@808: /// kpeter@809: /// This function returns the flow on the given arc. kpeter@808: /// kpeter@808: /// \pre \ref run() must be called before using this function. kpeter@809: Value flow(const Arc& a) const { kpeter@809: return _res_cap[_arc_idb[a]]; kpeter@808: } kpeter@808: kpeter@1003: /// \brief Copy the flow values (the primal solution) into the kpeter@1003: /// given map. kpeter@808: /// kpeter@809: /// This function copies the flow value on each arc into the given kpeter@809: /// map. The \c Value type of the algorithm must be convertible to kpeter@809: /// the \c Value type of the map. kpeter@808: /// kpeter@808: /// \pre \ref run() must be called before using this function. kpeter@809: template kpeter@809: void flowMap(FlowMap &map) const { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: map.set(a, _res_cap[_arc_idb[a]]); kpeter@809: } kpeter@808: } kpeter@808: kpeter@809: /// \brief Return the potential (dual value) of the given node. kpeter@808: /// kpeter@809: /// This function returns the potential (dual value) of the kpeter@809: /// given node. kpeter@808: /// kpeter@808: /// \pre \ref run() must be called before using this function. kpeter@809: Cost potential(const Node& n) const { kpeter@809: return static_cast(_pi[_node_id[n]]); kpeter@809: } kpeter@809: kpeter@1003: /// \brief Copy the potential values (the dual solution) into the kpeter@1003: /// given map. kpeter@809: /// kpeter@809: /// This function copies the potential (dual value) of each node kpeter@809: /// into the given map. kpeter@809: /// The \c Cost type of the algorithm must be convertible to the kpeter@809: /// \c Value type of the map. kpeter@809: /// kpeter@809: /// \pre \ref run() must be called before using this function. kpeter@809: template kpeter@809: void potentialMap(PotentialMap &map) const { kpeter@809: for (NodeIt n(_graph); n != INVALID; ++n) { kpeter@809: map.set(n, static_cast(_pi[_node_id[n]])); kpeter@809: } kpeter@808: } kpeter@808: kpeter@808: /// @} kpeter@808: kpeter@808: private: kpeter@808: kpeter@809: // Initialize the algorithm kpeter@809: ProblemType init() { kpeter@821: if (_res_node_num <= 1) return INFEASIBLE; kpeter@809: kpeter@809: // Check the sum of supply values kpeter@809: _sum_supply = 0; kpeter@809: for (int i = 0; i != _root; ++i) { kpeter@809: _sum_supply += _supply[i]; kpeter@808: } kpeter@809: if (_sum_supply > 0) return INFEASIBLE; alpar@877: kpeter@809: kpeter@809: // Initialize vectors kpeter@809: for (int i = 0; i != _res_node_num; ++i) { kpeter@809: _pi[i] = 0; kpeter@809: _excess[i] = _supply[i]; kpeter@809: } alpar@877: kpeter@809: // Remove infinite upper bounds and check negative arcs kpeter@809: const Value MAX = std::numeric_limits::max(); kpeter@809: int last_out; kpeter@809: if (_have_lower) { kpeter@809: for (int i = 0; i != _root; ++i) { kpeter@809: last_out = _first_out[i+1]; kpeter@809: for (int j = _first_out[i]; j != last_out; ++j) { kpeter@809: if (_forward[j]) { kpeter@809: Value c = _scost[j] < 0 ? _upper[j] : _lower[j]; kpeter@809: if (c >= MAX) return UNBOUNDED; kpeter@809: _excess[i] -= c; kpeter@809: _excess[_target[j]] += c; kpeter@809: } kpeter@809: } kpeter@809: } kpeter@809: } else { kpeter@809: for (int i = 0; i != _root; ++i) { kpeter@809: last_out = _first_out[i+1]; kpeter@809: for (int j = _first_out[i]; j != last_out; ++j) { kpeter@809: if (_forward[j] && _scost[j] < 0) { kpeter@809: Value c = _upper[j]; kpeter@809: if (c >= MAX) return UNBOUNDED; kpeter@809: _excess[i] -= c; kpeter@809: _excess[_target[j]] += c; kpeter@809: } kpeter@809: } kpeter@809: } kpeter@809: } kpeter@809: Value ex, max_cap = 0; kpeter@809: for (int i = 0; i != _res_node_num; ++i) { kpeter@809: ex = _excess[i]; kpeter@809: _excess[i] = 0; kpeter@809: if (ex < 0) max_cap -= ex; kpeter@809: } kpeter@809: for (int j = 0; j != _res_arc_num; ++j) { kpeter@809: if (_upper[j] >= MAX) _upper[j] = max_cap; kpeter@808: } kpeter@808: kpeter@809: // Initialize the large cost vector and the epsilon parameter kpeter@809: _epsilon = 0; kpeter@809: LargeCost lc; kpeter@809: for (int i = 0; i != _root; ++i) { kpeter@809: last_out = _first_out[i+1]; kpeter@809: for (int j = _first_out[i]; j != last_out; ++j) { kpeter@809: lc = static_cast(_scost[j]) * _res_node_num * _alpha; kpeter@809: _cost[j] = lc; kpeter@809: if (lc > _epsilon) _epsilon = lc; kpeter@809: } kpeter@809: } kpeter@809: _epsilon /= _alpha; kpeter@808: kpeter@809: // Initialize maps for Circulation and remove non-zero lower bounds kpeter@809: ConstMap low(0); kpeter@809: typedef typename Digraph::template ArcMap ValueArcMap; kpeter@809: typedef typename Digraph::template NodeMap ValueNodeMap; kpeter@809: ValueArcMap cap(_graph), flow(_graph); kpeter@809: ValueNodeMap sup(_graph); kpeter@809: for (NodeIt n(_graph); n != INVALID; ++n) { kpeter@809: sup[n] = _supply[_node_id[n]]; kpeter@808: } kpeter@809: if (_have_lower) { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: int j = _arc_idf[a]; kpeter@809: Value c = _lower[j]; kpeter@809: cap[a] = _upper[j] - c; kpeter@809: sup[_graph.source(a)] -= c; kpeter@809: sup[_graph.target(a)] += c; kpeter@809: } kpeter@809: } else { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: cap[a] = _upper[_arc_idf[a]]; kpeter@809: } kpeter@809: } kpeter@808: kpeter@839: _sup_node_num = 0; kpeter@839: for (NodeIt n(_graph); n != INVALID; ++n) { kpeter@839: if (sup[n] > 0) ++_sup_node_num; kpeter@839: } kpeter@839: kpeter@808: // Find a feasible flow using Circulation kpeter@809: Circulation, ValueArcMap, ValueNodeMap> kpeter@809: circ(_graph, low, cap, sup); kpeter@809: if (!circ.flowMap(flow).run()) return INFEASIBLE; kpeter@809: kpeter@809: // Set residual capacities and handle GEQ supply type kpeter@809: if (_sum_supply < 0) { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: Value fa = flow[a]; kpeter@809: _res_cap[_arc_idf[a]] = cap[a] - fa; kpeter@809: _res_cap[_arc_idb[a]] = fa; kpeter@809: sup[_graph.source(a)] -= fa; kpeter@809: sup[_graph.target(a)] += fa; kpeter@809: } kpeter@809: for (NodeIt n(_graph); n != INVALID; ++n) { kpeter@809: _excess[_node_id[n]] = sup[n]; kpeter@809: } kpeter@809: for (int a = _first_out[_root]; a != _res_arc_num; ++a) { kpeter@809: int u = _target[a]; kpeter@809: int ra = _reverse[a]; kpeter@809: _res_cap[a] = -_sum_supply + 1; kpeter@809: _res_cap[ra] = -_excess[u]; kpeter@809: _cost[a] = 0; kpeter@809: _cost[ra] = 0; kpeter@809: _excess[u] = 0; kpeter@809: } kpeter@809: } else { kpeter@809: for (ArcIt a(_graph); a != INVALID; ++a) { kpeter@809: Value fa = flow[a]; kpeter@809: _res_cap[_arc_idf[a]] = cap[a] - fa; kpeter@809: _res_cap[_arc_idb[a]] = fa; kpeter@809: } kpeter@809: for (int a = _first_out[_root]; a != _res_arc_num; ++a) { kpeter@809: int ra = _reverse[a]; kpeter@839: _res_cap[a] = 0; kpeter@809: _res_cap[ra] = 0; kpeter@809: _cost[a] = 0; kpeter@809: _cost[ra] = 0; kpeter@809: } kpeter@809: } alpar@877: alpar@877: // Initialize data structures for buckets kpeter@839: _max_rank = _alpha * _res_node_num; kpeter@839: _buckets.resize(_max_rank); kpeter@839: _bucket_next.resize(_res_node_num + 1); kpeter@839: _bucket_prev.resize(_res_node_num + 1); kpeter@839: _rank.resize(_res_node_num + 1); alpar@877: kpeter@934: return OPTIMAL; kpeter@934: } kpeter@934: kpeter@934: // Execute the algorithm and transform the results kpeter@934: void start(Method method) { kpeter@934: const int MAX_PARTIAL_PATH_LENGTH = 4; kpeter@934: kpeter@810: switch (method) { kpeter@810: case PUSH: kpeter@810: startPush(); kpeter@810: break; kpeter@810: case AUGMENT: kpeter@931: startAugment(_res_node_num - 1); kpeter@810: break; kpeter@810: case PARTIAL_AUGMENT: kpeter@934: startAugment(MAX_PARTIAL_PATH_LENGTH); kpeter@810: break; kpeter@809: } kpeter@809: kpeter@937: // Compute node potentials (dual solution) kpeter@937: for (int i = 0; i != _res_node_num; ++i) { kpeter@937: _pi[i] = static_cast(_pi[i] / (_res_node_num * _alpha)); kpeter@937: } kpeter@937: bool optimal = true; kpeter@937: for (int i = 0; optimal && i != _res_node_num; ++i) { kpeter@937: LargeCost pi_i = _pi[i]; kpeter@937: int last_out = _first_out[i+1]; kpeter@937: for (int j = _first_out[i]; j != last_out; ++j) { kpeter@937: if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) { kpeter@937: optimal = false; kpeter@937: break; kpeter@937: } kpeter@809: } kpeter@809: } kpeter@809: kpeter@937: if (!optimal) { kpeter@937: // Compute node potentials for the original costs with BellmanFord kpeter@937: // (if it is necessary) kpeter@937: typedef std::pair IntPair; kpeter@937: StaticDigraph sgr; kpeter@937: std::vector arc_vec; kpeter@937: std::vector cost_vec; kpeter@937: LargeCostArcMap cost_map(cost_vec); kpeter@937: kpeter@937: arc_vec.clear(); kpeter@937: cost_vec.clear(); kpeter@937: for (int j = 0; j != _res_arc_num; ++j) { kpeter@937: if (_res_cap[j] > 0) { kpeter@937: int u = _source[j], v = _target[j]; kpeter@937: arc_vec.push_back(IntPair(u, v)); kpeter@937: cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]); kpeter@937: } kpeter@937: } kpeter@937: sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end()); kpeter@937: kpeter@937: typename BellmanFord::Create kpeter@937: bf(sgr, cost_map); kpeter@937: bf.init(0); kpeter@937: bf.start(); kpeter@937: kpeter@937: for (int i = 0; i != _res_node_num; ++i) { kpeter@937: _pi[i] += bf.dist(sgr.node(i)); kpeter@937: } kpeter@937: } kpeter@937: kpeter@937: // Shift potentials to meet the requirements of the GEQ type kpeter@937: // optimality conditions kpeter@937: LargeCost max_pot = _pi[_root]; kpeter@937: for (int i = 0; i != _res_node_num; ++i) { kpeter@937: if (_pi[i] > max_pot) max_pot = _pi[i]; kpeter@937: } kpeter@937: if (max_pot != 0) { kpeter@937: for (int i = 0; i != _res_node_num; ++i) { kpeter@937: _pi[i] -= max_pot; kpeter@937: } kpeter@937: } kpeter@809: kpeter@809: // Handle non-zero lower bounds kpeter@809: if (_have_lower) { kpeter@809: int limit = _first_out[_root]; kpeter@809: for (int j = 0; j != limit; ++j) { kpeter@809: if (!_forward[j]) _res_cap[j] += _lower[j]; kpeter@809: } kpeter@809: } kpeter@808: } alpar@877: kpeter@839: // Initialize a cost scaling phase kpeter@839: void initPhase() { kpeter@839: // Saturate arcs not satisfying the optimality condition kpeter@839: for (int u = 0; u != _res_node_num; ++u) { kpeter@839: int last_out = _first_out[u+1]; kpeter@839: LargeCost pi_u = _pi[u]; kpeter@839: for (int a = _first_out[u]; a != last_out; ++a) { kpeter@934: Value delta = _res_cap[a]; kpeter@934: if (delta > 0) { kpeter@934: int v = _target[a]; kpeter@934: if (_cost[a] + pi_u - _pi[v] < 0) { kpeter@934: _excess[u] -= delta; kpeter@934: _excess[v] += delta; kpeter@934: _res_cap[a] = 0; kpeter@934: _res_cap[_reverse[a]] += delta; kpeter@934: } kpeter@839: } kpeter@839: } kpeter@839: } alpar@877: kpeter@839: // Find active nodes (i.e. nodes with positive excess) kpeter@839: for (int u = 0; u != _res_node_num; ++u) { kpeter@839: if (_excess[u] > 0) _active_nodes.push_back(u); kpeter@839: } kpeter@839: kpeter@839: // Initialize the next arcs kpeter@839: for (int u = 0; u != _res_node_num; ++u) { kpeter@839: _next_out[u] = _first_out[u]; kpeter@839: } kpeter@839: } alpar@877: kpeter@936: // Price (potential) refinement heuristic kpeter@936: bool priceRefinement() { kpeter@839: kpeter@936: // Stack for stroing the topological order kpeter@936: IntVector stack(_res_node_num); kpeter@936: int stack_top; kpeter@936: kpeter@936: // Perform phases kpeter@936: while (topologicalSort(stack, stack_top)) { kpeter@936: kpeter@936: // Compute node ranks in the acyclic admissible network and kpeter@936: // store the nodes in buckets kpeter@936: for (int i = 0; i != _res_node_num; ++i) { kpeter@936: _rank[i] = 0; kpeter@839: } kpeter@936: const int bucket_end = _root + 1; kpeter@936: for (int r = 0; r != _max_rank; ++r) { kpeter@936: _buckets[r] = bucket_end; kpeter@936: } kpeter@936: int top_rank = 0; kpeter@936: for ( ; stack_top >= 0; --stack_top) { kpeter@936: int u = stack[stack_top], v; kpeter@936: int rank_u = _rank[u]; kpeter@936: kpeter@936: LargeCost rc, pi_u = _pi[u]; kpeter@936: int last_out = _first_out[u+1]; kpeter@936: for (int a = _first_out[u]; a != last_out; ++a) { kpeter@936: if (_res_cap[a] > 0) { kpeter@936: v = _target[a]; kpeter@936: rc = _cost[a] + pi_u - _pi[v]; kpeter@936: if (rc < 0) { kpeter@936: LargeCost nrc = static_cast((-rc - 0.5) / _epsilon); kpeter@936: if (nrc < LargeCost(_max_rank)) { kpeter@936: int new_rank_v = rank_u + static_cast(nrc); kpeter@936: if (new_rank_v > _rank[v]) { kpeter@936: _rank[v] = new_rank_v; kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: if (rank_u > 0) { kpeter@936: top_rank = std::max(top_rank, rank_u); kpeter@936: int bfirst = _buckets[rank_u]; kpeter@936: _bucket_next[u] = bfirst; kpeter@936: _bucket_prev[bfirst] = u; kpeter@936: _buckets[rank_u] = u; kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Check if the current flow is epsilon-optimal kpeter@936: if (top_rank == 0) { kpeter@936: return true; kpeter@936: } kpeter@936: kpeter@936: // Process buckets in top-down order kpeter@936: for (int rank = top_rank; rank > 0; --rank) { kpeter@936: while (_buckets[rank] != bucket_end) { kpeter@936: // Remove the first node from the current bucket kpeter@936: int u = _buckets[rank]; kpeter@936: _buckets[rank] = _bucket_next[u]; kpeter@936: kpeter@936: // Search the outgoing arcs of u kpeter@936: LargeCost rc, pi_u = _pi[u]; kpeter@936: int last_out = _first_out[u+1]; kpeter@936: int v, old_rank_v, new_rank_v; kpeter@936: for (int a = _first_out[u]; a != last_out; ++a) { kpeter@936: if (_res_cap[a] > 0) { kpeter@936: v = _target[a]; kpeter@936: old_rank_v = _rank[v]; kpeter@936: kpeter@936: if (old_rank_v < rank) { kpeter@936: kpeter@936: // Compute the new rank of node v kpeter@936: rc = _cost[a] + pi_u - _pi[v]; kpeter@936: if (rc < 0) { kpeter@936: new_rank_v = rank; kpeter@936: } else { kpeter@936: LargeCost nrc = rc / _epsilon; kpeter@936: new_rank_v = 0; kpeter@936: if (nrc < LargeCost(_max_rank)) { kpeter@936: new_rank_v = rank - 1 - static_cast(nrc); kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Change the rank of node v kpeter@936: if (new_rank_v > old_rank_v) { kpeter@936: _rank[v] = new_rank_v; kpeter@936: kpeter@936: // Remove v from its old bucket kpeter@936: if (old_rank_v > 0) { kpeter@936: if (_buckets[old_rank_v] == v) { kpeter@936: _buckets[old_rank_v] = _bucket_next[v]; kpeter@936: } else { kpeter@936: int pv = _bucket_prev[v], nv = _bucket_next[v]; kpeter@936: _bucket_next[pv] = nv; kpeter@936: _bucket_prev[nv] = pv; kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Insert v into its new bucket kpeter@936: int nv = _buckets[new_rank_v]; kpeter@936: _bucket_next[v] = nv; kpeter@936: _bucket_prev[nv] = v; kpeter@936: _buckets[new_rank_v] = v; kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Refine potential of node u kpeter@936: _pi[u] -= rank * _epsilon; kpeter@936: } kpeter@936: } kpeter@936: kpeter@839: } kpeter@839: kpeter@936: return false; kpeter@936: } kpeter@936: kpeter@936: // Find and cancel cycles in the admissible network and kpeter@936: // determine topological order using DFS kpeter@936: bool topologicalSort(IntVector &stack, int &stack_top) { kpeter@936: const int MAX_CYCLE_CANCEL = 1; kpeter@936: kpeter@936: BoolVector reached(_res_node_num, false); kpeter@936: BoolVector processed(_res_node_num, false); kpeter@936: IntVector pred(_res_node_num); kpeter@936: for (int i = 0; i != _res_node_num; ++i) { kpeter@936: _next_out[i] = _first_out[i]; kpeter@839: } kpeter@936: stack_top = -1; kpeter@936: kpeter@936: int cycle_cnt = 0; kpeter@936: for (int start = 0; start != _res_node_num; ++start) { kpeter@936: if (reached[start]) continue; kpeter@936: kpeter@936: // Start DFS search from this start node kpeter@936: pred[start] = -1; kpeter@936: int tip = start, v; kpeter@936: while (true) { kpeter@936: // Check the outgoing arcs of the current tip node kpeter@936: reached[tip] = true; kpeter@936: LargeCost pi_tip = _pi[tip]; kpeter@936: int a, last_out = _first_out[tip+1]; kpeter@936: for (a = _next_out[tip]; a != last_out; ++a) { kpeter@936: if (_res_cap[a] > 0) { kpeter@936: v = _target[a]; kpeter@936: if (_cost[a] + pi_tip - _pi[v] < 0) { kpeter@936: if (!reached[v]) { kpeter@936: // A new node is reached kpeter@936: reached[v] = true; kpeter@936: pred[v] = tip; kpeter@936: _next_out[tip] = a; kpeter@936: tip = v; kpeter@936: a = _next_out[tip]; kpeter@936: last_out = _first_out[tip+1]; kpeter@936: break; kpeter@936: } kpeter@936: else if (!processed[v]) { kpeter@936: // A cycle is found kpeter@936: ++cycle_cnt; kpeter@936: _next_out[tip] = a; kpeter@936: kpeter@936: // Find the minimum residual capacity along the cycle kpeter@936: Value d, delta = _res_cap[a]; kpeter@936: int u, delta_node = tip; kpeter@936: for (u = tip; u != v; ) { kpeter@936: u = pred[u]; kpeter@936: d = _res_cap[_next_out[u]]; kpeter@936: if (d <= delta) { kpeter@936: delta = d; kpeter@936: delta_node = u; kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Augment along the cycle kpeter@936: _res_cap[a] -= delta; kpeter@936: _res_cap[_reverse[a]] += delta; kpeter@936: for (u = tip; u != v; ) { kpeter@936: u = pred[u]; kpeter@936: int ca = _next_out[u]; kpeter@936: _res_cap[ca] -= delta; kpeter@936: _res_cap[_reverse[ca]] += delta; kpeter@936: } kpeter@936: kpeter@936: // Check the maximum number of cycle canceling kpeter@936: if (cycle_cnt >= MAX_CYCLE_CANCEL) { kpeter@936: return false; kpeter@936: } kpeter@936: kpeter@936: // Roll back search to delta_node kpeter@936: if (delta_node != tip) { kpeter@936: for (u = tip; u != delta_node; u = pred[u]) { kpeter@936: reached[u] = false; kpeter@936: } kpeter@936: tip = delta_node; kpeter@936: a = _next_out[tip] + 1; kpeter@936: last_out = _first_out[tip+1]; kpeter@936: break; kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: // Step back to the previous node kpeter@936: if (a == last_out) { kpeter@936: processed[tip] = true; kpeter@936: stack[++stack_top] = tip; kpeter@936: tip = pred[tip]; kpeter@936: if (tip < 0) { kpeter@936: // Finish DFS from the current start node kpeter@936: break; kpeter@936: } kpeter@936: ++_next_out[tip]; kpeter@936: } kpeter@936: } kpeter@936: kpeter@936: } kpeter@936: kpeter@936: return (cycle_cnt == 0); kpeter@839: } kpeter@839: kpeter@839: // Global potential update heuristic kpeter@839: void globalUpdate() { kpeter@934: const int bucket_end = _root + 1; alpar@877: kpeter@839: // Initialize buckets kpeter@839: for (int r = 0; r != _max_rank; ++r) { kpeter@839: _buckets[r] = bucket_end; kpeter@839: } kpeter@839: Value total_excess = 0; kpeter@934: int b0 = bucket_end; kpeter@839: for (int i = 0; i != _res_node_num; ++i) { kpeter@839: if (_excess[i] < 0) { kpeter@839: _rank[i] = 0; kpeter@934: _bucket_next[i] = b0; kpeter@934: _bucket_prev[b0] = i; kpeter@934: b0 = i; kpeter@839: } else { kpeter@839: total_excess += _excess[i]; kpeter@839: _rank[i] = _max_rank; kpeter@839: } kpeter@839: } kpeter@839: if (total_excess == 0) return; kpeter@934: _buckets[0] = b0; kpeter@839: kpeter@839: // Search the buckets kpeter@839: int r = 0; kpeter@839: for ( ; r != _max_rank; ++r) { kpeter@839: while (_buckets[r] != bucket_end) { kpeter@839: // Remove the first node from the current bucket kpeter@839: int u = _buckets[r]; kpeter@839: _buckets[r] = _bucket_next[u]; alpar@877: kpeter@1049: // Search the incoming arcs of u kpeter@839: LargeCost pi_u = _pi[u]; kpeter@839: int last_out = _first_out[u+1]; kpeter@839: for (int a = _first_out[u]; a != last_out; ++a) { kpeter@839: int ra = _reverse[a]; kpeter@839: if (_res_cap[ra] > 0) { kpeter@839: int v = _source[ra]; kpeter@839: int old_rank_v = _rank[v]; kpeter@839: if (r < old_rank_v) { kpeter@839: // Compute the new rank of v kpeter@839: LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; kpeter@839: int new_rank_v = old_rank_v; kpeter@934: if (nrc < LargeCost(_max_rank)) { kpeter@934: new_rank_v = r + 1 + static_cast(nrc); kpeter@934: } alpar@877: kpeter@839: // Change the rank of v kpeter@839: if (new_rank_v < old_rank_v) { kpeter@839: _rank[v] = new_rank_v; kpeter@839: _next_out[v] = _first_out[v]; alpar@877: kpeter@839: // Remove v from its old bucket kpeter@839: if (old_rank_v < _max_rank) { kpeter@839: if (_buckets[old_rank_v] == v) { kpeter@839: _buckets[old_rank_v] = _bucket_next[v]; kpeter@839: } else { kpeter@934: int pv = _bucket_prev[v], nv = _bucket_next[v]; kpeter@934: _bucket_next[pv] = nv; kpeter@934: _bucket_prev[nv] = pv; kpeter@839: } kpeter@839: } alpar@877: kpeter@934: // Insert v into its new bucket kpeter@934: int nv = _buckets[new_rank_v]; kpeter@934: _bucket_next[v] = nv; kpeter@934: _bucket_prev[nv] = v; kpeter@839: _buckets[new_rank_v] = v; kpeter@839: } kpeter@839: } kpeter@839: } kpeter@839: } kpeter@839: kpeter@839: // Finish search if there are no more active nodes kpeter@839: if (_excess[u] > 0) { kpeter@839: total_excess -= _excess[u]; kpeter@839: if (total_excess <= 0) break; kpeter@839: } kpeter@839: } kpeter@839: if (total_excess <= 0) break; kpeter@839: } alpar@877: kpeter@839: // Relabel nodes kpeter@839: for (int u = 0; u != _res_node_num; ++u) { kpeter@839: int k = std::min(_rank[u], r); kpeter@839: if (k > 0) { kpeter@839: _pi[u] -= _epsilon * k; kpeter@839: _next_out[u] = _first_out[u]; kpeter@839: } kpeter@839: } kpeter@839: } kpeter@808: kpeter@810: /// Execute the algorithm performing augment and relabel operations kpeter@931: void startAugment(int max_length) { kpeter@808: // Paramters for heuristics kpeter@936: const int PRICE_REFINEMENT_LIMIT = 2; kpeter@935: const double GLOBAL_UPDATE_FACTOR = 1.0; kpeter@935: const int global_update_skip = static_cast(GLOBAL_UPDATE_FACTOR * kpeter@839: (_res_node_num + _sup_node_num * _sup_node_num)); kpeter@935: int next_global_update_limit = global_update_skip; alpar@877: kpeter@809: // Perform cost scaling phases kpeter@935: IntVector path; kpeter@935: BoolVector path_arc(_res_arc_num, false); kpeter@935: int relabel_cnt = 0; kpeter@936: int eps_phase_cnt = 0; kpeter@808: for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? kpeter@808: 1 : _epsilon / _alpha ) kpeter@808: { kpeter@936: ++eps_phase_cnt; kpeter@936: kpeter@936: // Price refinement heuristic kpeter@936: if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { kpeter@936: if (priceRefinement()) continue; kpeter@808: } alpar@877: kpeter@839: // Initialize current phase kpeter@839: initPhase(); alpar@877: kpeter@808: // Perform partial augment and relabel operations kpeter@809: while (true) { kpeter@808: // Select an active node (FIFO selection) kpeter@809: while (_active_nodes.size() > 0 && kpeter@809: _excess[_active_nodes.front()] <= 0) { kpeter@809: _active_nodes.pop_front(); kpeter@808: } kpeter@809: if (_active_nodes.size() == 0) break; kpeter@809: int start = _active_nodes.front(); kpeter@808: kpeter@808: // Find an augmenting path from the start node kpeter@809: int tip = start; kpeter@935: while (int(path.size()) < max_length && _excess[tip] >= 0) { kpeter@809: int u; kpeter@935: LargeCost rc, min_red_cost = std::numeric_limits::max(); kpeter@935: LargeCost pi_tip = _pi[tip]; kpeter@839: int last_out = _first_out[tip+1]; kpeter@809: for (int a = _next_out[tip]; a != last_out; ++a) { kpeter@935: if (_res_cap[a] > 0) { kpeter@935: u = _target[a]; kpeter@935: rc = _cost[a] + pi_tip - _pi[u]; kpeter@935: if (rc < 0) { kpeter@935: path.push_back(a); kpeter@935: _next_out[tip] = a; kpeter@935: if (path_arc[a]) { kpeter@935: goto augment; // a cycle is found, stop path search kpeter@935: } kpeter@935: tip = u; kpeter@935: path_arc[a] = true; kpeter@935: goto next_step; kpeter@935: } kpeter@935: else if (rc < min_red_cost) { kpeter@935: min_red_cost = rc; kpeter@935: } kpeter@808: } kpeter@808: } kpeter@808: kpeter@808: // Relabel tip node kpeter@839: if (tip != start) { kpeter@839: int ra = _reverse[path.back()]; kpeter@935: min_red_cost = kpeter@935: std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]); kpeter@839: } kpeter@935: last_out = _next_out[tip]; kpeter@809: for (int a = _first_out[tip]; a != last_out; ++a) { kpeter@935: if (_res_cap[a] > 0) { kpeter@935: rc = _cost[a] + pi_tip - _pi[_target[a]]; kpeter@935: if (rc < min_red_cost) { kpeter@935: min_red_cost = rc; kpeter@935: } kpeter@809: } kpeter@808: } kpeter@809: _pi[tip] -= min_red_cost + _epsilon; kpeter@809: _next_out[tip] = _first_out[tip]; kpeter@839: ++relabel_cnt; kpeter@808: kpeter@808: // Step back kpeter@808: if (tip != start) { kpeter@935: int pa = path.back(); kpeter@935: path_arc[pa] = false; kpeter@935: tip = _source[pa]; kpeter@839: path.pop_back(); kpeter@808: } kpeter@808: kpeter@809: next_step: ; kpeter@808: } kpeter@808: kpeter@808: // Augment along the found path (as much flow as possible) kpeter@935: augment: kpeter@809: Value delta; kpeter@839: int pa, u, v = start; kpeter@839: for (int i = 0; i != int(path.size()); ++i) { kpeter@839: pa = path[i]; kpeter@809: u = v; kpeter@839: v = _target[pa]; kpeter@935: path_arc[pa] = false; kpeter@809: delta = std::min(_res_cap[pa], _excess[u]); kpeter@809: _res_cap[pa] -= delta; kpeter@809: _res_cap[_reverse[pa]] += delta; kpeter@809: _excess[u] -= delta; kpeter@809: _excess[v] += delta; kpeter@935: if (_excess[v] > 0 && _excess[v] <= delta) { kpeter@809: _active_nodes.push_back(v); kpeter@935: } kpeter@808: } kpeter@935: path.clear(); kpeter@839: kpeter@839: // Global update heuristic kpeter@935: if (relabel_cnt >= next_global_update_limit) { kpeter@839: globalUpdate(); kpeter@935: next_global_update_limit += global_update_skip; kpeter@839: } kpeter@808: } kpeter@935: kpeter@808: } kpeter@935: kpeter@808: } kpeter@808: kpeter@809: /// Execute the algorithm performing push and relabel operations kpeter@810: void startPush() { kpeter@808: // Paramters for heuristics kpeter@936: const int PRICE_REFINEMENT_LIMIT = 2; kpeter@839: const double GLOBAL_UPDATE_FACTOR = 2.0; kpeter@808: kpeter@935: const int global_update_skip = static_cast(GLOBAL_UPDATE_FACTOR * kpeter@839: (_res_node_num + _sup_node_num * _sup_node_num)); kpeter@935: int next_global_update_limit = global_update_skip; alpar@877: kpeter@809: // Perform cost scaling phases kpeter@809: BoolVector hyper(_res_node_num, false); kpeter@839: LargeCostVector hyper_cost(_res_node_num); kpeter@935: int relabel_cnt = 0; kpeter@936: int eps_phase_cnt = 0; kpeter@808: for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? kpeter@808: 1 : _epsilon / _alpha ) kpeter@808: { kpeter@936: ++eps_phase_cnt; kpeter@936: kpeter@936: // Price refinement heuristic kpeter@936: if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { kpeter@936: if (priceRefinement()) continue; kpeter@808: } alpar@877: kpeter@839: // Initialize current phase kpeter@839: initPhase(); kpeter@808: kpeter@808: // Perform push and relabel operations kpeter@809: while (_active_nodes.size() > 0) { kpeter@839: LargeCost min_red_cost, rc, pi_n; kpeter@809: Value delta; kpeter@809: int n, t, a, last_out = _res_arc_num; kpeter@809: kpeter@839: next_node: kpeter@808: // Select an active node (FIFO selection) kpeter@809: n = _active_nodes.front(); kpeter@839: last_out = _first_out[n+1]; kpeter@839: pi_n = _pi[n]; alpar@877: kpeter@808: // Perform push operations if there are admissible arcs kpeter@809: if (_excess[n] > 0) { kpeter@809: for (a = _next_out[n]; a != last_out; ++a) { kpeter@809: if (_res_cap[a] > 0 && kpeter@839: _cost[a] + pi_n - _pi[_target[a]] < 0) { kpeter@809: delta = std::min(_res_cap[a], _excess[n]); kpeter@809: t = _target[a]; kpeter@808: kpeter@808: // Push-look-ahead heuristic kpeter@809: Value ahead = -_excess[t]; kpeter@839: int last_out_t = _first_out[t+1]; kpeter@839: LargeCost pi_t = _pi[t]; kpeter@809: for (int ta = _next_out[t]; ta != last_out_t; ++ta) { alpar@877: if (_res_cap[ta] > 0 && kpeter@839: _cost[ta] + pi_t - _pi[_target[ta]] < 0) kpeter@809: ahead += _res_cap[ta]; kpeter@809: if (ahead >= delta) break; kpeter@808: } kpeter@808: if (ahead < 0) ahead = 0; kpeter@808: kpeter@808: // Push flow along the arc kpeter@839: if (ahead < delta && !hyper[t]) { kpeter@809: _res_cap[a] -= ahead; kpeter@809: _res_cap[_reverse[a]] += ahead; kpeter@808: _excess[n] -= ahead; kpeter@808: _excess[t] += ahead; kpeter@809: _active_nodes.push_front(t); kpeter@808: hyper[t] = true; kpeter@839: hyper_cost[t] = _cost[a] + pi_n - pi_t; kpeter@809: _next_out[n] = a; kpeter@809: goto next_node; kpeter@808: } else { kpeter@809: _res_cap[a] -= delta; kpeter@809: _res_cap[_reverse[a]] += delta; kpeter@808: _excess[n] -= delta; kpeter@808: _excess[t] += delta; kpeter@808: if (_excess[t] > 0 && _excess[t] <= delta) kpeter@809: _active_nodes.push_back(t); kpeter@808: } kpeter@808: kpeter@809: if (_excess[n] == 0) { kpeter@809: _next_out[n] = a; kpeter@809: goto remove_nodes; kpeter@809: } kpeter@808: } kpeter@808: } kpeter@809: _next_out[n] = a; kpeter@808: } kpeter@808: kpeter@808: // Relabel the node if it is still active (or hyper) kpeter@809: if (_excess[n] > 0 || hyper[n]) { kpeter@839: min_red_cost = hyper[n] ? -hyper_cost[n] : kpeter@839: std::numeric_limits::max(); kpeter@809: for (int a = _first_out[n]; a != last_out; ++a) { kpeter@935: if (_res_cap[a] > 0) { kpeter@935: rc = _cost[a] + pi_n - _pi[_target[a]]; kpeter@935: if (rc < min_red_cost) { kpeter@935: min_red_cost = rc; kpeter@935: } kpeter@809: } kpeter@808: } kpeter@809: _pi[n] -= min_red_cost + _epsilon; kpeter@839: _next_out[n] = _first_out[n]; kpeter@808: hyper[n] = false; kpeter@839: ++relabel_cnt; kpeter@808: } alpar@877: kpeter@808: // Remove nodes that are not active nor hyper kpeter@809: remove_nodes: kpeter@809: while ( _active_nodes.size() > 0 && kpeter@809: _excess[_active_nodes.front()] <= 0 && kpeter@809: !hyper[_active_nodes.front()] ) { kpeter@809: _active_nodes.pop_front(); kpeter@808: } alpar@877: kpeter@839: // Global update heuristic kpeter@935: if (relabel_cnt >= next_global_update_limit) { kpeter@839: globalUpdate(); kpeter@839: for (int u = 0; u != _res_node_num; ++u) kpeter@839: hyper[u] = false; kpeter@935: next_global_update_limit += global_update_skip; kpeter@839: } kpeter@808: } kpeter@808: } kpeter@808: } kpeter@808: kpeter@808: }; //class CostScaling kpeter@808: kpeter@808: ///@} kpeter@808: kpeter@808: } //namespace lemon kpeter@808: kpeter@808: #endif //LEMON_COST_SCALING_H