| 1 | 1 | /* -*- mode: C++; indent-tabs-mode: nil; -*- | 
| 2 | 2 | * | 
| 3 | 3 | * This file is a part of LEMON, a generic C++ optimization library. | 
| 4 | 4 | * | 
| 5 | 5 | * Copyright (C) 2003-2010 | 
| 6 | 6 | * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport | 
| 7 | 7 | * (Egervary Research Group on Combinatorial Optimization, EGRES). | 
| 8 | 8 | * | 
| 9 | 9 | * Permission to use, modify and distribute this software is granted | 
| 10 | 10 | * provided that this copyright notice appears in all copies. For | 
| 11 | 11 | * precise terms see the accompanying LICENSE file. | 
| 12 | 12 | * | 
| 13 | 13 | * This software is provided "AS IS" with no warranty of any kind, | 
| 14 | 14 | * express or implied, and with no claim as to its suitability for any | 
| 15 | 15 | * purpose. | 
| 16 | 16 | * | 
| 17 | 17 | */ | 
| 18 | 18 |  | 
| 19 | 19 | #ifndef LEMON_COST_SCALING_H | 
| 20 | 20 | #define LEMON_COST_SCALING_H | 
| 21 | 21 |  | 
| 22 | 22 | /// \ingroup min_cost_flow_algs | 
| 23 | 23 | /// \file | 
| 24 | 24 | /// \brief Cost scaling algorithm for finding a minimum cost flow. | 
| 25 | 25 |  | 
| 26 | 26 | #include <vector> | 
| 27 | 27 | #include <deque> | 
| 28 | 28 | #include <limits> | 
| 29 | 29 |  | 
| 30 | 30 | #include <lemon/core.h> | 
| 31 | 31 | #include <lemon/maps.h> | 
| 32 | 32 | #include <lemon/math.h> | 
| 33 | 33 | #include <lemon/static_graph.h> | 
| 34 | 34 | #include <lemon/circulation.h> | 
| 35 | 35 | #include <lemon/bellman_ford.h> | 
| 36 | 36 |  | 
| 37 | 37 | namespace lemon {
 | 
| 38 | 38 |  | 
| 39 | 39 | /// \brief Default traits class of CostScaling algorithm. | 
| 40 | 40 | /// | 
| 41 | 41 | /// Default traits class of CostScaling algorithm. | 
| 42 | 42 | /// \tparam GR Digraph type. | 
| 43 | 43 | /// \tparam V The number type used for flow amounts, capacity bounds | 
| 44 | 44 | /// and supply values. By default it is \c int. | 
| 45 | 45 | /// \tparam C The number type used for costs and potentials. | 
| 46 | 46 | /// By default it is the same as \c V. | 
| 47 | 47 | #ifdef DOXYGEN | 
| 48 | 48 | template <typename GR, typename V = int, typename C = V> | 
| 49 | 49 | #else | 
| 50 | 50 | template < typename GR, typename V = int, typename C = V, | 
| 51 | 51 | bool integer = std::numeric_limits<C>::is_integer > | 
| 52 | 52 | #endif | 
| 53 | 53 | struct CostScalingDefaultTraits | 
| 54 | 54 |   {
 | 
| 55 | 55 | /// The type of the digraph | 
| 56 | 56 | typedef GR Digraph; | 
| 57 | 57 | /// The type of the flow amounts, capacity bounds and supply values | 
| 58 | 58 | typedef V Value; | 
| 59 | 59 | /// The type of the arc costs | 
| 60 | 60 | typedef C Cost; | 
| 61 | 61 |  | 
| 62 | 62 | /// \brief The large cost type used for internal computations | 
| 63 | 63 | /// | 
| 64 | 64 | /// The large cost type used for internal computations. | 
| 65 | 65 | /// It is \c long \c long if the \c Cost type is integer, | 
| 66 | 66 | /// otherwise it is \c double. | 
| 67 | 67 | /// \c Cost must be convertible to \c LargeCost. | 
| 68 | 68 | typedef double LargeCost; | 
| 69 | 69 | }; | 
| 70 | 70 |  | 
| 71 | 71 | // Default traits class for integer cost types | 
| 72 | 72 | template <typename GR, typename V, typename C> | 
| 73 | 73 | struct CostScalingDefaultTraits<GR, V, C, true> | 
| 74 | 74 |   {
 | 
| 75 | 75 | typedef GR Digraph; | 
| 76 | 76 | typedef V Value; | 
| 77 | 77 | typedef C Cost; | 
| 78 | 78 | #ifdef LEMON_HAVE_LONG_LONG | 
| 79 | 79 | typedef long long LargeCost; | 
| 80 | 80 | #else | 
| 81 | 81 | typedef long LargeCost; | 
| 82 | 82 | #endif | 
| 83 | 83 | }; | 
| 84 | 84 |  | 
| 85 | 85 |  | 
| 86 | 86 | /// \addtogroup min_cost_flow_algs | 
| 87 | 87 |   /// @{
 | 
| 88 | 88 |  | 
| 89 | 89 | /// \brief Implementation of the Cost Scaling algorithm for | 
| 90 | 90 | /// finding a \ref min_cost_flow "minimum cost flow". | 
| 91 | 91 | /// | 
| 92 | 92 | /// \ref CostScaling implements a cost scaling algorithm that performs | 
| 93 | 93 | /// push/augment and relabel operations for finding a \ref min_cost_flow | 
| 94 | 94 | /// "minimum cost flow" \ref amo93networkflows, \ref goldberg90approximation, | 
| 95 | 95 | /// \ref goldberg97efficient, \ref bunnagel98efficient. | 
| 96 | 96 | /// It is a highly efficient primal-dual solution method, which | 
| 97 | 97 | /// can be viewed as the generalization of the \ref Preflow | 
| 98 | 98 | /// "preflow push-relabel" algorithm for the maximum flow problem. | 
| 99 | 99 | /// | 
| 100 | 100 | /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest | 
| 101 | 101 | /// implementations available in LEMON for this problem. | 
| 102 | 102 | /// | 
| 103 | 103 | /// Most of the parameters of the problem (except for the digraph) | 
| 104 | 104 | /// can be given using separate functions, and the algorithm can be | 
| 105 | 105 | /// executed using the \ref run() function. If some parameters are not | 
| 106 | 106 | /// specified, then default values will be used. | 
| 107 | 107 | /// | 
| 108 | 108 | /// \tparam GR The digraph type the algorithm runs on. | 
| 109 | 109 | /// \tparam V The number type used for flow amounts, capacity bounds | 
| 110 | 110 | /// and supply values in the algorithm. By default, it is \c int. | 
| 111 | 111 | /// \tparam C The number type used for costs and potentials in the | 
| 112 | 112 | /// algorithm. By default, it is the same as \c V. | 
| 113 | 113 | /// \tparam TR The traits class that defines various types used by the | 
| 114 | 114 | /// algorithm. By default, it is \ref CostScalingDefaultTraits | 
| 115 | 115 | /// "CostScalingDefaultTraits<GR, V, C>". | 
| 116 | 116 | /// In most cases, this parameter should not be set directly, | 
| 117 | 117 | /// consider to use the named template parameters instead. | 
| 118 | 118 | /// | 
| 119 | 119 | /// \warning Both \c V and \c C must be signed number types. | 
| 120 | 120 | /// \warning All input data (capacities, supply values, and costs) must | 
| 121 | 121 | /// be integer. | 
| 122 | 122 | /// \warning This algorithm does not support negative costs for | 
| 123 | 123 | /// arcs having infinite upper bound. | 
| 124 | 124 | /// | 
| 125 | 125 | /// \note %CostScaling provides three different internal methods, | 
| 126 | 126 | /// from which the most efficient one is used by default. | 
| 127 | 127 | /// For more information, see \ref Method. | 
| 128 | 128 | #ifdef DOXYGEN | 
| 129 | 129 | template <typename GR, typename V, typename C, typename TR> | 
| 130 | 130 | #else | 
| 131 | 131 | template < typename GR, typename V = int, typename C = V, | 
| 132 | 132 | typename TR = CostScalingDefaultTraits<GR, V, C> > | 
| 133 | 133 | #endif | 
| 134 | 134 | class CostScaling | 
| 135 | 135 |   {
 | 
| 136 | 136 | public: | 
| 137 | 137 |  | 
| 138 | 138 | /// The type of the digraph | 
| 139 | 139 | typedef typename TR::Digraph Digraph; | 
| 140 | 140 | /// The type of the flow amounts, capacity bounds and supply values | 
| 141 | 141 | typedef typename TR::Value Value; | 
| 142 | 142 | /// The type of the arc costs | 
| 143 | 143 | typedef typename TR::Cost Cost; | 
| 144 | 144 |  | 
| 145 | 145 | /// \brief The large cost type | 
| 146 | 146 | /// | 
| 147 | 147 | /// The large cost type used for internal computations. | 
| 148 | 148 | /// By default, it is \c long \c long if the \c Cost type is integer, | 
| 149 | 149 | /// otherwise it is \c double. | 
| 150 | 150 | typedef typename TR::LargeCost LargeCost; | 
| 151 | 151 |  | 
| 152 | 152 | /// The \ref CostScalingDefaultTraits "traits class" of the algorithm | 
| 153 | 153 | typedef TR Traits; | 
| 154 | 154 |  | 
| 155 | 155 | public: | 
| 156 | 156 |  | 
| 157 | 157 | /// \brief Problem type constants for the \c run() function. | 
| 158 | 158 | /// | 
| 159 | 159 | /// Enum type containing the problem type constants that can be | 
| 160 | 160 | /// returned by the \ref run() function of the algorithm. | 
| 161 | 161 |     enum ProblemType {
 | 
| 162 | 162 | /// The problem has no feasible solution (flow). | 
| 163 | 163 | INFEASIBLE, | 
| 164 | 164 | /// The problem has optimal solution (i.e. it is feasible and | 
| 165 | 165 | /// bounded), and the algorithm has found optimal flow and node | 
| 166 | 166 | /// potentials (primal and dual solutions). | 
| 167 | 167 | OPTIMAL, | 
| 168 | 168 | /// The digraph contains an arc of negative cost and infinite | 
| 169 | 169 | /// upper bound. It means that the objective function is unbounded | 
| 170 | 170 | /// on that arc, however, note that it could actually be bounded | 
| 171 | 171 | /// over the feasible flows, but this algroithm cannot handle | 
| 172 | 172 | /// these cases. | 
| 173 | 173 | UNBOUNDED | 
| 174 | 174 | }; | 
| 175 | 175 |  | 
| 176 | 176 | /// \brief Constants for selecting the internal method. | 
| 177 | 177 | /// | 
| 178 | 178 | /// Enum type containing constants for selecting the internal method | 
| 179 | 179 | /// for the \ref run() function. | 
| 180 | 180 | /// | 
| 181 | 181 | /// \ref CostScaling provides three internal methods that differ mainly | 
| 182 | 182 | /// in their base operations, which are used in conjunction with the | 
| 183 | 183 | /// relabel operation. | 
| 184 | 184 | /// By default, the so called \ref PARTIAL_AUGMENT | 
| 185 | 185 | /// "Partial Augment-Relabel" method is used, which turned out to be | 
| 186 | 186 | /// the most efficient and the most robust on various test inputs. | 
| 187 | 187 | /// However, the other methods can be selected using the \ref run() | 
| 188 | 188 | /// function with the proper parameter. | 
| 189 | 189 |     enum Method {
 | 
| 190 | 190 | /// Local push operations are used, i.e. flow is moved only on one | 
| 191 | 191 | /// admissible arc at once. | 
| 192 | 192 | PUSH, | 
| 193 | 193 | /// Augment operations are used, i.e. flow is moved on admissible | 
| 194 | 194 | /// paths from a node with excess to a node with deficit. | 
| 195 | 195 | AUGMENT, | 
| 196 | 196 | /// Partial augment operations are used, i.e. flow is moved on | 
| 197 | 197 | /// admissible paths started from a node with excess, but the | 
| 198 | 198 | /// lengths of these paths are limited. This method can be viewed | 
| 199 | 199 | /// as a combined version of the previous two operations. | 
| 200 | 200 | PARTIAL_AUGMENT | 
| 201 | 201 | }; | 
| 202 | 202 |  | 
| 203 | 203 | private: | 
| 204 | 204 |  | 
| 205 | 205 | TEMPLATE_DIGRAPH_TYPEDEFS(GR); | 
| 206 | 206 |  | 
| 207 | 207 | typedef std::vector<int> IntVector; | 
| 208 | 208 | typedef std::vector<Value> ValueVector; | 
| 209 | 209 | typedef std::vector<Cost> CostVector; | 
| 210 | 210 | typedef std::vector<LargeCost> LargeCostVector; | 
| 211 | 211 | typedef std::vector<char> BoolVector; | 
| 212 | 212 | // Note: vector<char> is used instead of vector<bool> for efficiency reasons | 
| 213 | 213 |  | 
| 214 | 214 | private: | 
| 215 | 215 |  | 
| 216 | 216 | template <typename KT, typename VT> | 
| 217 | 217 |     class StaticVectorMap {
 | 
| 218 | 218 | public: | 
| 219 | 219 | typedef KT Key; | 
| 220 | 220 | typedef VT Value; | 
| 221 | 221 |  | 
| 222 | 222 |       StaticVectorMap(std::vector<Value>& v) : _v(v) {}
 | 
| 223 | 223 |  | 
| 224 | 224 |       const Value& operator[](const Key& key) const {
 | 
| 225 | 225 | return _v[StaticDigraph::id(key)]; | 
| 226 | 226 | } | 
| 227 | 227 |  | 
| 228 | 228 |       Value& operator[](const Key& key) {
 | 
| 229 | 229 | return _v[StaticDigraph::id(key)]; | 
| 230 | 230 | } | 
| 231 | 231 |  | 
| 232 | 232 |       void set(const Key& key, const Value& val) {
 | 
| 233 | 233 | _v[StaticDigraph::id(key)] = val; | 
| 234 | 234 | } | 
| 235 | 235 |  | 
| 236 | 236 | private: | 
| 237 | 237 | std::vector<Value>& _v; | 
| 238 | 238 | }; | 
| 239 | 239 |  | 
| 240 | typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap; | |
| 241 | 240 | typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap; | 
| 242 | 241 |  | 
| 243 | 242 | private: | 
| 244 | 243 |  | 
| 245 | 244 | // Data related to the underlying digraph | 
| 246 | 245 | const GR &_graph; | 
| 247 | 246 | int _node_num; | 
| 248 | 247 | int _arc_num; | 
| 249 | 248 | int _res_node_num; | 
| 250 | 249 | int _res_arc_num; | 
| 251 | 250 | int _root; | 
| 252 | 251 |  | 
| 253 | 252 | // Parameters of the problem | 
| 254 | 253 | bool _have_lower; | 
| 255 | 254 | Value _sum_supply; | 
| 256 | 255 | int _sup_node_num; | 
| 257 | 256 |  | 
| 258 | 257 | // Data structures for storing the digraph | 
| 259 | 258 | IntNodeMap _node_id; | 
| 260 | 259 | IntArcMap _arc_idf; | 
| 261 | 260 | IntArcMap _arc_idb; | 
| 262 | 261 | IntVector _first_out; | 
| 263 | 262 | BoolVector _forward; | 
| 264 | 263 | IntVector _source; | 
| 265 | 264 | IntVector _target; | 
| 266 | 265 | IntVector _reverse; | 
| 267 | 266 |  | 
| 268 | 267 | // Node and arc data | 
| 269 | 268 | ValueVector _lower; | 
| 270 | 269 | ValueVector _upper; | 
| 271 | 270 | CostVector _scost; | 
| 272 | 271 | ValueVector _supply; | 
| 273 | 272 |  | 
| 274 | 273 | ValueVector _res_cap; | 
| 275 | 274 | LargeCostVector _cost; | 
| 276 | 275 | LargeCostVector _pi; | 
| 277 | 276 | ValueVector _excess; | 
| 278 | 277 | IntVector _next_out; | 
| 279 | 278 | std::deque<int> _active_nodes; | 
| 280 | 279 |  | 
| 281 | 280 | // Data for scaling | 
| 282 | 281 | LargeCost _epsilon; | 
| 283 | 282 | int _alpha; | 
| 284 | 283 |  | 
| 285 | 284 | IntVector _buckets; | 
| 286 | 285 | IntVector _bucket_next; | 
| 287 | 286 | IntVector _bucket_prev; | 
| 288 | 287 | IntVector _rank; | 
| 289 | 288 | int _max_rank; | 
| 290 | 289 |  | 
| 291 | // Data for a StaticDigraph structure | |
| 292 | typedef std::pair<int, int> IntPair; | |
| 293 | StaticDigraph _sgr; | |
| 294 | std::vector<IntPair> _arc_vec; | |
| 295 | std::vector<LargeCost> _cost_vec; | |
| 296 | LargeCostArcMap _cost_map; | |
| 297 | LargeCostNodeMap _pi_map; | |
| 298 |  | |
| 299 | 290 | public: | 
| 300 | 291 |  | 
| 301 | 292 | /// \brief Constant for infinite upper bounds (capacities). | 
| 302 | 293 | /// | 
| 303 | 294 | /// Constant for infinite upper bounds (capacities). | 
| 304 | 295 | /// It is \c std::numeric_limits<Value>::infinity() if available, | 
| 305 | 296 | /// \c std::numeric_limits<Value>::max() otherwise. | 
| 306 | 297 | const Value INF; | 
| 307 | 298 |  | 
| 308 | 299 | public: | 
| 309 | 300 |  | 
| 310 | 301 | /// \name Named Template Parameters | 
| 311 | 302 |     /// @{
 | 
| 312 | 303 |  | 
| 313 | 304 | template <typename T> | 
| 314 | 305 |     struct SetLargeCostTraits : public Traits {
 | 
| 315 | 306 | typedef T LargeCost; | 
| 316 | 307 | }; | 
| 317 | 308 |  | 
| 318 | 309 | /// \brief \ref named-templ-param "Named parameter" for setting | 
| 319 | 310 | /// \c LargeCost type. | 
| 320 | 311 | /// | 
| 321 | 312 | /// \ref named-templ-param "Named parameter" for setting \c LargeCost | 
| 322 | 313 | /// type, which is used for internal computations in the algorithm. | 
| 323 | 314 | /// \c Cost must be convertible to \c LargeCost. | 
| 324 | 315 | template <typename T> | 
| 325 | 316 | struct SetLargeCost | 
| 326 | 317 |       : public CostScaling<GR, V, C, SetLargeCostTraits<T> > {
 | 
| 327 | 318 | typedef CostScaling<GR, V, C, SetLargeCostTraits<T> > Create; | 
| 328 | 319 | }; | 
| 329 | 320 |  | 
| 330 | 321 | /// @} | 
| 331 | 322 |  | 
| 332 | 323 | protected: | 
| 333 | 324 |  | 
| 334 | 325 |     CostScaling() {}
 | 
| 335 | 326 |  | 
| 336 | 327 | public: | 
| 337 | 328 |  | 
| 338 | 329 | /// \brief Constructor. | 
| 339 | 330 | /// | 
| 340 | 331 | /// The constructor of the class. | 
| 341 | 332 | /// | 
| 342 | 333 | /// \param graph The digraph the algorithm runs on. | 
| 343 | 334 | CostScaling(const GR& graph) : | 
| 344 | 335 | _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph), | 
| 345 | _cost_map(_cost_vec), _pi_map(_pi), | |
| 346 | 336 | INF(std::numeric_limits<Value>::has_infinity ? | 
| 347 | 337 | std::numeric_limits<Value>::infinity() : | 
| 348 | 338 | std::numeric_limits<Value>::max()) | 
| 349 | 339 |     {
 | 
| 350 | 340 | // Check the number types | 
| 351 | 341 | LEMON_ASSERT(std::numeric_limits<Value>::is_signed, | 
| 352 | 342 | "The flow type of CostScaling must be signed"); | 
| 353 | 343 | LEMON_ASSERT(std::numeric_limits<Cost>::is_signed, | 
| 354 | 344 | "The cost type of CostScaling must be signed"); | 
| 355 | 345 |  | 
| 356 | 346 | // Reset data structures | 
| 357 | 347 | reset(); | 
| 358 | 348 | } | 
| 359 | 349 |  | 
| 360 | 350 | /// \name Parameters | 
| 361 | 351 | /// The parameters of the algorithm can be specified using these | 
| 362 | 352 | /// functions. | 
| 363 | 353 |  | 
| 364 | 354 |     /// @{
 | 
| 365 | 355 |  | 
| 366 | 356 | /// \brief Set the lower bounds on the arcs. | 
| 367 | 357 | /// | 
| 368 | 358 | /// This function sets the lower bounds on the arcs. | 
| 369 | 359 | /// If it is not used before calling \ref run(), the lower bounds | 
| 370 | 360 | /// will be set to zero on all arcs. | 
| 371 | 361 | /// | 
| 372 | 362 | /// \param map An arc map storing the lower bounds. | 
| 373 | 363 | /// Its \c Value type must be convertible to the \c Value type | 
| 374 | 364 | /// of the algorithm. | 
| 375 | 365 | /// | 
| 376 | 366 | /// \return <tt>(*this)</tt> | 
| 377 | 367 | template <typename LowerMap> | 
| 378 | 368 |     CostScaling& lowerMap(const LowerMap& map) {
 | 
| 379 | 369 | _have_lower = true; | 
| 380 | 370 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 381 | 371 | _lower[_arc_idf[a]] = map[a]; | 
| 382 | 372 | _lower[_arc_idb[a]] = map[a]; | 
| 383 | 373 | } | 
| 384 | 374 | return *this; | 
| 385 | 375 | } | 
| 386 | 376 |  | 
| 387 | 377 | /// \brief Set the upper bounds (capacities) on the arcs. | 
| 388 | 378 | /// | 
| 389 | 379 | /// This function sets the upper bounds (capacities) on the arcs. | 
| 390 | 380 | /// If it is not used before calling \ref run(), the upper bounds | 
| 391 | 381 | /// will be set to \ref INF on all arcs (i.e. the flow value will be | 
| 392 | 382 | /// unbounded from above). | 
| 393 | 383 | /// | 
| 394 | 384 | /// \param map An arc map storing the upper bounds. | 
| 395 | 385 | /// Its \c Value type must be convertible to the \c Value type | 
| 396 | 386 | /// of the algorithm. | 
| 397 | 387 | /// | 
| 398 | 388 | /// \return <tt>(*this)</tt> | 
| 399 | 389 | template<typename UpperMap> | 
| 400 | 390 |     CostScaling& upperMap(const UpperMap& map) {
 | 
| 401 | 391 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 402 | 392 | _upper[_arc_idf[a]] = map[a]; | 
| 403 | 393 | } | 
| 404 | 394 | return *this; | 
| 405 | 395 | } | 
| 406 | 396 |  | 
| 407 | 397 | /// \brief Set the costs of the arcs. | 
| 408 | 398 | /// | 
| 409 | 399 | /// This function sets the costs of the arcs. | 
| 410 | 400 | /// If it is not used before calling \ref run(), the costs | 
| 411 | 401 | /// will be set to \c 1 on all arcs. | 
| 412 | 402 | /// | 
| 413 | 403 | /// \param map An arc map storing the costs. | 
| 414 | 404 | /// Its \c Value type must be convertible to the \c Cost type | 
| 415 | 405 | /// of the algorithm. | 
| 416 | 406 | /// | 
| 417 | 407 | /// \return <tt>(*this)</tt> | 
| 418 | 408 | template<typename CostMap> | 
| 419 | 409 |     CostScaling& costMap(const CostMap& map) {
 | 
| 420 | 410 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 421 | 411 | _scost[_arc_idf[a]] = map[a]; | 
| 422 | 412 | _scost[_arc_idb[a]] = -map[a]; | 
| 423 | 413 | } | 
| 424 | 414 | return *this; | 
| 425 | 415 | } | 
| 426 | 416 |  | 
| 427 | 417 | /// \brief Set the supply values of the nodes. | 
| 428 | 418 | /// | 
| 429 | 419 | /// This function sets the supply values of the nodes. | 
| 430 | 420 | /// If neither this function nor \ref stSupply() is used before | 
| 431 | 421 | /// calling \ref run(), the supply of each node will be set to zero. | 
| 432 | 422 | /// | 
| 433 | 423 | /// \param map A node map storing the supply values. | 
| 434 | 424 | /// Its \c Value type must be convertible to the \c Value type | 
| 435 | 425 | /// of the algorithm. | 
| 436 | 426 | /// | 
| 437 | 427 | /// \return <tt>(*this)</tt> | 
| 438 | 428 | template<typename SupplyMap> | 
| 439 | 429 |     CostScaling& supplyMap(const SupplyMap& map) {
 | 
| 440 | 430 |       for (NodeIt n(_graph); n != INVALID; ++n) {
 | 
| 441 | 431 | _supply[_node_id[n]] = map[n]; | 
| 442 | 432 | } | 
| 443 | 433 | return *this; | 
| 444 | 434 | } | 
| 445 | 435 |  | 
| 446 | 436 | /// \brief Set single source and target nodes and a supply value. | 
| 447 | 437 | /// | 
| 448 | 438 | /// This function sets a single source node and a single target node | 
| 449 | 439 | /// and the required flow value. | 
| 450 | 440 | /// If neither this function nor \ref supplyMap() is used before | 
| 451 | 441 | /// calling \ref run(), the supply of each node will be set to zero. | 
| 452 | 442 | /// | 
| 453 | 443 | /// Using this function has the same effect as using \ref supplyMap() | 
| 454 | 444 | /// with a map in which \c k is assigned to \c s, \c -k is | 
| 455 | 445 | /// assigned to \c t and all other nodes have zero supply value. | 
| 456 | 446 | /// | 
| 457 | 447 | /// \param s The source node. | 
| 458 | 448 | /// \param t The target node. | 
| 459 | 449 | /// \param k The required amount of flow from node \c s to node \c t | 
| 460 | 450 | /// (i.e. the supply of \c s and the demand of \c t). | 
| 461 | 451 | /// | 
| 462 | 452 | /// \return <tt>(*this)</tt> | 
| 463 | 453 |     CostScaling& stSupply(const Node& s, const Node& t, Value k) {
 | 
| 464 | 454 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 465 | 455 | _supply[i] = 0; | 
| 466 | 456 | } | 
| 467 | 457 | _supply[_node_id[s]] = k; | 
| 468 | 458 | _supply[_node_id[t]] = -k; | 
| 469 | 459 | return *this; | 
| 470 | 460 | } | 
| 471 | 461 |  | 
| 472 | 462 | /// @} | 
| 473 | 463 |  | 
| 474 | 464 | /// \name Execution control | 
| 475 | 465 | /// The algorithm can be executed using \ref run(). | 
| 476 | 466 |  | 
| 477 | 467 |     /// @{
 | 
| 478 | 468 |  | 
| 479 | 469 | /// \brief Run the algorithm. | 
| 480 | 470 | /// | 
| 481 | 471 | /// This function runs the algorithm. | 
| 482 | 472 | /// The paramters can be specified using functions \ref lowerMap(), | 
| 483 | 473 | /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). | 
| 484 | 474 | /// For example, | 
| 485 | 475 | /// \code | 
| 486 | 476 | /// CostScaling<ListDigraph> cs(graph); | 
| 487 | 477 | /// cs.lowerMap(lower).upperMap(upper).costMap(cost) | 
| 488 | 478 | /// .supplyMap(sup).run(); | 
| 489 | 479 | /// \endcode | 
| 490 | 480 | /// | 
| 491 | 481 | /// This function can be called more than once. All the given parameters | 
| 492 | 482 | /// are kept for the next call, unless \ref resetParams() or \ref reset() | 
| 493 | 483 | /// is used, thus only the modified parameters have to be set again. | 
| 494 | 484 | /// If the underlying digraph was also modified after the construction | 
| 495 | 485 | /// of the class (or the last \ref reset() call), then the \ref reset() | 
| 496 | 486 | /// function must be called. | 
| 497 | 487 | /// | 
| 498 | 488 | /// \param method The internal method that will be used in the | 
| 499 | 489 | /// algorithm. For more information, see \ref Method. | 
| 500 | 490 | /// \param factor The cost scaling factor. It must be larger than one. | 
| 501 | 491 | /// | 
| 502 | 492 | /// \return \c INFEASIBLE if no feasible flow exists, | 
| 503 | 493 | /// \n \c OPTIMAL if the problem has optimal solution | 
| 504 | 494 | /// (i.e. it is feasible and bounded), and the algorithm has found | 
| 505 | 495 | /// optimal flow and node potentials (primal and dual solutions), | 
| 506 | 496 | /// \n \c UNBOUNDED if the digraph contains an arc of negative cost | 
| 507 | 497 | /// and infinite upper bound. It means that the objective function | 
| 508 | 498 | /// is unbounded on that arc, however, note that it could actually be | 
| 509 | 499 | /// bounded over the feasible flows, but this algroithm cannot handle | 
| 510 | 500 | /// these cases. | 
| 511 | 501 | /// | 
| 512 | 502 | /// \see ProblemType, Method | 
| 513 | 503 | /// \see resetParams(), reset() | 
| 514 | 504 |     ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) {
 | 
| 515 | 505 | _alpha = factor; | 
| 516 | 506 | ProblemType pt = init(); | 
| 517 | 507 | if (pt != OPTIMAL) return pt; | 
| 518 | 508 | start(method); | 
| 519 | 509 | return OPTIMAL; | 
| 520 | 510 | } | 
| 521 | 511 |  | 
| 522 | 512 | /// \brief Reset all the parameters that have been given before. | 
| 523 | 513 | /// | 
| 524 | 514 | /// This function resets all the paramaters that have been given | 
| 525 | 515 | /// before using functions \ref lowerMap(), \ref upperMap(), | 
| 526 | 516 | /// \ref costMap(), \ref supplyMap(), \ref stSupply(). | 
| 527 | 517 | /// | 
| 528 | 518 | /// It is useful for multiple \ref run() calls. Basically, all the given | 
| 529 | 519 | /// parameters are kept for the next \ref run() call, unless | 
| 530 | 520 | /// \ref resetParams() or \ref reset() is used. | 
| 531 | 521 | /// If the underlying digraph was also modified after the construction | 
| 532 | 522 | /// of the class or the last \ref reset() call, then the \ref reset() | 
| 533 | 523 | /// function must be used, otherwise \ref resetParams() is sufficient. | 
| 534 | 524 | /// | 
| 535 | 525 | /// For example, | 
| 536 | 526 | /// \code | 
| 537 | 527 | /// CostScaling<ListDigraph> cs(graph); | 
| 538 | 528 | /// | 
| 539 | 529 | /// // First run | 
| 540 | 530 | /// cs.lowerMap(lower).upperMap(upper).costMap(cost) | 
| 541 | 531 | /// .supplyMap(sup).run(); | 
| 542 | 532 | /// | 
| 543 | 533 | /// // Run again with modified cost map (resetParams() is not called, | 
| 544 | 534 | /// // so only the cost map have to be set again) | 
| 545 | 535 | /// cost[e] += 100; | 
| 546 | 536 | /// cs.costMap(cost).run(); | 
| 547 | 537 | /// | 
| 548 | 538 | /// // Run again from scratch using resetParams() | 
| 549 | 539 | /// // (the lower bounds will be set to zero on all arcs) | 
| 550 | 540 | /// cs.resetParams(); | 
| 551 | 541 | /// cs.upperMap(capacity).costMap(cost) | 
| 552 | 542 | /// .supplyMap(sup).run(); | 
| 553 | 543 | /// \endcode | 
| 554 | 544 | /// | 
| 555 | 545 | /// \return <tt>(*this)</tt> | 
| 556 | 546 | /// | 
| 557 | 547 | /// \see reset(), run() | 
| 558 | 548 |     CostScaling& resetParams() {
 | 
| 559 | 549 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 560 | 550 | _supply[i] = 0; | 
| 561 | 551 | } | 
| 562 | 552 | int limit = _first_out[_root]; | 
| 563 | 553 |       for (int j = 0; j != limit; ++j) {
 | 
| 564 | 554 | _lower[j] = 0; | 
| 565 | 555 | _upper[j] = INF; | 
| 566 | 556 | _scost[j] = _forward[j] ? 1 : -1; | 
| 567 | 557 | } | 
| 568 | 558 |       for (int j = limit; j != _res_arc_num; ++j) {
 | 
| 569 | 559 | _lower[j] = 0; | 
| 570 | 560 | _upper[j] = INF; | 
| 571 | 561 | _scost[j] = 0; | 
| 572 | 562 | _scost[_reverse[j]] = 0; | 
| 573 | 563 | } | 
| 574 | 564 | _have_lower = false; | 
| 575 | 565 | return *this; | 
| 576 | 566 | } | 
| 577 | 567 |  | 
| 578 | 568 | /// \brief Reset the internal data structures and all the parameters | 
| 579 | 569 | /// that have been given before. | 
| 580 | 570 | /// | 
| 581 | 571 | /// This function resets the internal data structures and all the | 
| 582 | 572 | /// paramaters that have been given before using functions \ref lowerMap(), | 
| 583 | 573 | /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). | 
| 584 | 574 | /// | 
| 585 | 575 | /// It is useful for multiple \ref run() calls. By default, all the given | 
| 586 | 576 | /// parameters are kept for the next \ref run() call, unless | 
| 587 | 577 | /// \ref resetParams() or \ref reset() is used. | 
| 588 | 578 | /// If the underlying digraph was also modified after the construction | 
| 589 | 579 | /// of the class or the last \ref reset() call, then the \ref reset() | 
| 590 | 580 | /// function must be used, otherwise \ref resetParams() is sufficient. | 
| 591 | 581 | /// | 
| 592 | 582 | /// See \ref resetParams() for examples. | 
| 593 | 583 | /// | 
| 594 | 584 | /// \return <tt>(*this)</tt> | 
| 595 | 585 | /// | 
| 596 | 586 | /// \see resetParams(), run() | 
| 597 | 587 |     CostScaling& reset() {
 | 
| 598 | 588 | // Resize vectors | 
| 599 | 589 | _node_num = countNodes(_graph); | 
| 600 | 590 | _arc_num = countArcs(_graph); | 
| 601 | 591 | _res_node_num = _node_num + 1; | 
| 602 | 592 | _res_arc_num = 2 * (_arc_num + _node_num); | 
| 603 | 593 | _root = _node_num; | 
| 604 | 594 |  | 
| 605 | 595 | _first_out.resize(_res_node_num + 1); | 
| 606 | 596 | _forward.resize(_res_arc_num); | 
| 607 | 597 | _source.resize(_res_arc_num); | 
| 608 | 598 | _target.resize(_res_arc_num); | 
| 609 | 599 | _reverse.resize(_res_arc_num); | 
| 610 | 600 |  | 
| 611 | 601 | _lower.resize(_res_arc_num); | 
| 612 | 602 | _upper.resize(_res_arc_num); | 
| 613 | 603 | _scost.resize(_res_arc_num); | 
| 614 | 604 | _supply.resize(_res_node_num); | 
| 615 | 605 |  | 
| 616 | 606 | _res_cap.resize(_res_arc_num); | 
| 617 | 607 | _cost.resize(_res_arc_num); | 
| 618 | 608 | _pi.resize(_res_node_num); | 
| 619 | 609 | _excess.resize(_res_node_num); | 
| 620 | 610 | _next_out.resize(_res_node_num); | 
| 621 | 611 |  | 
| 622 | _arc_vec.reserve(_res_arc_num); | |
| 623 | _cost_vec.reserve(_res_arc_num); | |
| 624 |  | |
| 625 | 612 | // Copy the graph | 
| 626 | 613 | int i = 0, j = 0, k = 2 * _arc_num + _node_num; | 
| 627 | 614 |       for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
 | 
| 628 | 615 | _node_id[n] = i; | 
| 629 | 616 | } | 
| 630 | 617 | i = 0; | 
| 631 | 618 |       for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
 | 
| 632 | 619 | _first_out[i] = j; | 
| 633 | 620 |         for (OutArcIt a(_graph, n); a != INVALID; ++a, ++j) {
 | 
| 634 | 621 | _arc_idf[a] = j; | 
| 635 | 622 | _forward[j] = true; | 
| 636 | 623 | _source[j] = i; | 
| 637 | 624 | _target[j] = _node_id[_graph.runningNode(a)]; | 
| 638 | 625 | } | 
| 639 | 626 |         for (InArcIt a(_graph, n); a != INVALID; ++a, ++j) {
 | 
| 640 | 627 | _arc_idb[a] = j; | 
| 641 | 628 | _forward[j] = false; | 
| 642 | 629 | _source[j] = i; | 
| 643 | 630 | _target[j] = _node_id[_graph.runningNode(a)]; | 
| 644 | 631 | } | 
| 645 | 632 | _forward[j] = false; | 
| 646 | 633 | _source[j] = i; | 
| 647 | 634 | _target[j] = _root; | 
| 648 | 635 | _reverse[j] = k; | 
| 649 | 636 | _forward[k] = true; | 
| 650 | 637 | _source[k] = _root; | 
| 651 | 638 | _target[k] = i; | 
| 652 | 639 | _reverse[k] = j; | 
| 653 | 640 | ++j; ++k; | 
| 654 | 641 | } | 
| 655 | 642 | _first_out[i] = j; | 
| 656 | 643 | _first_out[_res_node_num] = k; | 
| 657 | 644 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 658 | 645 | int fi = _arc_idf[a]; | 
| 659 | 646 | int bi = _arc_idb[a]; | 
| 660 | 647 | _reverse[fi] = bi; | 
| 661 | 648 | _reverse[bi] = fi; | 
| 662 | 649 | } | 
| 663 | 650 |  | 
| 664 | 651 | // Reset parameters | 
| 665 | 652 | resetParams(); | 
| 666 | 653 | return *this; | 
| 667 | 654 | } | 
| 668 | 655 |  | 
| 669 | 656 | /// @} | 
| 670 | 657 |  | 
| 671 | 658 | /// \name Query Functions | 
| 672 | 659 | /// The results of the algorithm can be obtained using these | 
| 673 | 660 | /// functions.\n | 
| 674 | 661 | /// The \ref run() function must be called before using them. | 
| 675 | 662 |  | 
| 676 | 663 |     /// @{
 | 
| 677 | 664 |  | 
| 678 | 665 | /// \brief Return the total cost of the found flow. | 
| 679 | 666 | /// | 
| 680 | 667 | /// This function returns the total cost of the found flow. | 
| 681 | 668 | /// Its complexity is O(e). | 
| 682 | 669 | /// | 
| 683 | 670 | /// \note The return type of the function can be specified as a | 
| 684 | 671 | /// template parameter. For example, | 
| 685 | 672 | /// \code | 
| 686 | 673 | /// cs.totalCost<double>(); | 
| 687 | 674 | /// \endcode | 
| 688 | 675 | /// It is useful if the total cost cannot be stored in the \c Cost | 
| 689 | 676 | /// type of the algorithm, which is the default return type of the | 
| 690 | 677 | /// function. | 
| 691 | 678 | /// | 
| 692 | 679 | /// \pre \ref run() must be called before using this function. | 
| 693 | 680 | template <typename Number> | 
| 694 | 681 |     Number totalCost() const {
 | 
| 695 | 682 | Number c = 0; | 
| 696 | 683 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 697 | 684 | int i = _arc_idb[a]; | 
| 698 | 685 | c += static_cast<Number>(_res_cap[i]) * | 
| 699 | 686 | (-static_cast<Number>(_scost[i])); | 
| 700 | 687 | } | 
| 701 | 688 | return c; | 
| 702 | 689 | } | 
| 703 | 690 |  | 
| 704 | 691 | #ifndef DOXYGEN | 
| 705 | 692 |     Cost totalCost() const {
 | 
| 706 | 693 | return totalCost<Cost>(); | 
| 707 | 694 | } | 
| 708 | 695 | #endif | 
| 709 | 696 |  | 
| 710 | 697 | /// \brief Return the flow on the given arc. | 
| 711 | 698 | /// | 
| 712 | 699 | /// This function returns the flow on the given arc. | 
| 713 | 700 | /// | 
| 714 | 701 | /// \pre \ref run() must be called before using this function. | 
| 715 | 702 |     Value flow(const Arc& a) const {
 | 
| 716 | 703 | return _res_cap[_arc_idb[a]]; | 
| 717 | 704 | } | 
| 718 | 705 |  | 
| 719 | 706 | /// \brief Return the flow map (the primal solution). | 
| 720 | 707 | /// | 
| 721 | 708 | /// This function copies the flow value on each arc into the given | 
| 722 | 709 | /// map. The \c Value type of the algorithm must be convertible to | 
| 723 | 710 | /// the \c Value type of the map. | 
| 724 | 711 | /// | 
| 725 | 712 | /// \pre \ref run() must be called before using this function. | 
| 726 | 713 | template <typename FlowMap> | 
| 727 | 714 |     void flowMap(FlowMap &map) const {
 | 
| 728 | 715 |       for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 729 | 716 | map.set(a, _res_cap[_arc_idb[a]]); | 
| 730 | 717 | } | 
| 731 | 718 | } | 
| 732 | 719 |  | 
| 733 | 720 | /// \brief Return the potential (dual value) of the given node. | 
| 734 | 721 | /// | 
| 735 | 722 | /// This function returns the potential (dual value) of the | 
| 736 | 723 | /// given node. | 
| 737 | 724 | /// | 
| 738 | 725 | /// \pre \ref run() must be called before using this function. | 
| 739 | 726 |     Cost potential(const Node& n) const {
 | 
| 740 | 727 | return static_cast<Cost>(_pi[_node_id[n]]); | 
| 741 | 728 | } | 
| 742 | 729 |  | 
| 743 | 730 | /// \brief Return the potential map (the dual solution). | 
| 744 | 731 | /// | 
| 745 | 732 | /// This function copies the potential (dual value) of each node | 
| 746 | 733 | /// into the given map. | 
| 747 | 734 | /// The \c Cost type of the algorithm must be convertible to the | 
| 748 | 735 | /// \c Value type of the map. | 
| 749 | 736 | /// | 
| 750 | 737 | /// \pre \ref run() must be called before using this function. | 
| 751 | 738 | template <typename PotentialMap> | 
| 752 | 739 |     void potentialMap(PotentialMap &map) const {
 | 
| 753 | 740 |       for (NodeIt n(_graph); n != INVALID; ++n) {
 | 
| 754 | 741 | map.set(n, static_cast<Cost>(_pi[_node_id[n]])); | 
| 755 | 742 | } | 
| 756 | 743 | } | 
| 757 | 744 |  | 
| 758 | 745 | /// @} | 
| 759 | 746 |  | 
| 760 | 747 | private: | 
| 761 | 748 |  | 
| 762 | 749 | // Initialize the algorithm | 
| 763 | 750 |     ProblemType init() {
 | 
| 764 | 751 | if (_res_node_num <= 1) return INFEASIBLE; | 
| 765 | 752 |  | 
| 766 | 753 | // Check the sum of supply values | 
| 767 | 754 | _sum_supply = 0; | 
| 768 | 755 |       for (int i = 0; i != _root; ++i) {
 | 
| 769 | 756 | _sum_supply += _supply[i]; | 
| 770 | 757 | } | 
| 771 | 758 | if (_sum_supply > 0) return INFEASIBLE; | 
| 772 | 759 |  | 
| 773 | 760 |  | 
| 774 | 761 | // Initialize vectors | 
| 775 | 762 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 776 | 763 | _pi[i] = 0; | 
| 777 | 764 | _excess[i] = _supply[i]; | 
| 778 | 765 | } | 
| 779 | 766 |  | 
| 780 | 767 | // Remove infinite upper bounds and check negative arcs | 
| 781 | 768 | const Value MAX = std::numeric_limits<Value>::max(); | 
| 782 | 769 | int last_out; | 
| 783 | 770 |       if (_have_lower) {
 | 
| 784 | 771 |         for (int i = 0; i != _root; ++i) {
 | 
| 785 | 772 | last_out = _first_out[i+1]; | 
| 786 | 773 |           for (int j = _first_out[i]; j != last_out; ++j) {
 | 
| 787 | 774 |             if (_forward[j]) {
 | 
| 788 | 775 | Value c = _scost[j] < 0 ? _upper[j] : _lower[j]; | 
| 789 | 776 | if (c >= MAX) return UNBOUNDED; | 
| 790 | 777 | _excess[i] -= c; | 
| 791 | 778 | _excess[_target[j]] += c; | 
| 792 | 779 | } | 
| 793 | 780 | } | 
| 794 | 781 | } | 
| 795 | 782 |       } else {
 | 
| 796 | 783 |         for (int i = 0; i != _root; ++i) {
 | 
| 797 | 784 | last_out = _first_out[i+1]; | 
| 798 | 785 |           for (int j = _first_out[i]; j != last_out; ++j) {
 | 
| 799 | 786 |             if (_forward[j] && _scost[j] < 0) {
 | 
| 800 | 787 | Value c = _upper[j]; | 
| 801 | 788 | if (c >= MAX) return UNBOUNDED; | 
| 802 | 789 | _excess[i] -= c; | 
| 803 | 790 | _excess[_target[j]] += c; | 
| 804 | 791 | } | 
| 805 | 792 | } | 
| 806 | 793 | } | 
| 807 | 794 | } | 
| 808 | 795 | Value ex, max_cap = 0; | 
| 809 | 796 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 810 | 797 | ex = _excess[i]; | 
| 811 | 798 | _excess[i] = 0; | 
| 812 | 799 | if (ex < 0) max_cap -= ex; | 
| 813 | 800 | } | 
| 814 | 801 |       for (int j = 0; j != _res_arc_num; ++j) {
 | 
| 815 | 802 | if (_upper[j] >= MAX) _upper[j] = max_cap; | 
| 816 | 803 | } | 
| 817 | 804 |  | 
| 818 | 805 | // Initialize the large cost vector and the epsilon parameter | 
| 819 | 806 | _epsilon = 0; | 
| 820 | 807 | LargeCost lc; | 
| 821 | 808 |       for (int i = 0; i != _root; ++i) {
 | 
| 822 | 809 | last_out = _first_out[i+1]; | 
| 823 | 810 |         for (int j = _first_out[i]; j != last_out; ++j) {
 | 
| 824 | 811 | lc = static_cast<LargeCost>(_scost[j]) * _res_node_num * _alpha; | 
| 825 | 812 | _cost[j] = lc; | 
| 826 | 813 | if (lc > _epsilon) _epsilon = lc; | 
| 827 | 814 | } | 
| 828 | 815 | } | 
| 829 | 816 | _epsilon /= _alpha; | 
| 830 | 817 |  | 
| 831 | 818 | // Initialize maps for Circulation and remove non-zero lower bounds | 
| 832 | 819 | ConstMap<Arc, Value> low(0); | 
| 833 | 820 | typedef typename Digraph::template ArcMap<Value> ValueArcMap; | 
| 834 | 821 | typedef typename Digraph::template NodeMap<Value> ValueNodeMap; | 
| 835 | 822 | ValueArcMap cap(_graph), flow(_graph); | 
| 836 | 823 | ValueNodeMap sup(_graph); | 
| 837 | 824 |       for (NodeIt n(_graph); n != INVALID; ++n) {
 | 
| 838 | 825 | sup[n] = _supply[_node_id[n]]; | 
| 839 | 826 | } | 
| 840 | 827 |       if (_have_lower) {
 | 
| 841 | 828 |         for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 842 | 829 | int j = _arc_idf[a]; | 
| 843 | 830 | Value c = _lower[j]; | 
| 844 | 831 | cap[a] = _upper[j] - c; | 
| 845 | 832 | sup[_graph.source(a)] -= c; | 
| 846 | 833 | sup[_graph.target(a)] += c; | 
| 847 | 834 | } | 
| 848 | 835 |       } else {
 | 
| 849 | 836 |         for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 850 | 837 | cap[a] = _upper[_arc_idf[a]]; | 
| 851 | 838 | } | 
| 852 | 839 | } | 
| 853 | 840 |  | 
| 854 | 841 | _sup_node_num = 0; | 
| 855 | 842 |       for (NodeIt n(_graph); n != INVALID; ++n) {
 | 
| 856 | 843 | if (sup[n] > 0) ++_sup_node_num; | 
| 857 | 844 | } | 
| 858 | 845 |  | 
| 859 | 846 | // Find a feasible flow using Circulation | 
| 860 | 847 | Circulation<Digraph, ConstMap<Arc, Value>, ValueArcMap, ValueNodeMap> | 
| 861 | 848 | circ(_graph, low, cap, sup); | 
| 862 | 849 | if (!circ.flowMap(flow).run()) return INFEASIBLE; | 
| 863 | 850 |  | 
| 864 | 851 | // Set residual capacities and handle GEQ supply type | 
| 865 | 852 |       if (_sum_supply < 0) {
 | 
| 866 | 853 |         for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 867 | 854 | Value fa = flow[a]; | 
| 868 | 855 | _res_cap[_arc_idf[a]] = cap[a] - fa; | 
| 869 | 856 | _res_cap[_arc_idb[a]] = fa; | 
| 870 | 857 | sup[_graph.source(a)] -= fa; | 
| 871 | 858 | sup[_graph.target(a)] += fa; | 
| 872 | 859 | } | 
| 873 | 860 |         for (NodeIt n(_graph); n != INVALID; ++n) {
 | 
| 874 | 861 | _excess[_node_id[n]] = sup[n]; | 
| 875 | 862 | } | 
| 876 | 863 |         for (int a = _first_out[_root]; a != _res_arc_num; ++a) {
 | 
| 877 | 864 | int u = _target[a]; | 
| 878 | 865 | int ra = _reverse[a]; | 
| 879 | 866 | _res_cap[a] = -_sum_supply + 1; | 
| 880 | 867 | _res_cap[ra] = -_excess[u]; | 
| 881 | 868 | _cost[a] = 0; | 
| 882 | 869 | _cost[ra] = 0; | 
| 883 | 870 | _excess[u] = 0; | 
| 884 | 871 | } | 
| 885 | 872 |       } else {
 | 
| 886 | 873 |         for (ArcIt a(_graph); a != INVALID; ++a) {
 | 
| 887 | 874 | Value fa = flow[a]; | 
| 888 | 875 | _res_cap[_arc_idf[a]] = cap[a] - fa; | 
| 889 | 876 | _res_cap[_arc_idb[a]] = fa; | 
| 890 | 877 | } | 
| 891 | 878 |         for (int a = _first_out[_root]; a != _res_arc_num; ++a) {
 | 
| 892 | 879 | int ra = _reverse[a]; | 
| 893 | 880 | _res_cap[a] = 0; | 
| 894 | 881 | _res_cap[ra] = 0; | 
| 895 | 882 | _cost[a] = 0; | 
| 896 | 883 | _cost[ra] = 0; | 
| 897 | 884 | } | 
| 898 | 885 | } | 
| 899 | 886 |  | 
| 900 | 887 | // Initialize data structures for buckets | 
| 901 | 888 | _max_rank = _alpha * _res_node_num; | 
| 902 | 889 | _buckets.resize(_max_rank); | 
| 903 | 890 | _bucket_next.resize(_res_node_num + 1); | 
| 904 | 891 | _bucket_prev.resize(_res_node_num + 1); | 
| 905 | 892 | _rank.resize(_res_node_num + 1); | 
| 906 | 893 |  | 
| 907 | 894 | return OPTIMAL; | 
| 908 | 895 | } | 
| 909 | 896 |  | 
| 910 | 897 | // Execute the algorithm and transform the results | 
| 911 | 898 |     void start(Method method) {
 | 
| 912 | 899 | const int MAX_PARTIAL_PATH_LENGTH = 4; | 
| 913 | 900 |  | 
| 914 | 901 |       switch (method) {
 | 
| 915 | 902 | case PUSH: | 
| 916 | 903 | startPush(); | 
| 917 | 904 | break; | 
| 918 | 905 | case AUGMENT: | 
| 919 | 906 | startAugment(_res_node_num - 1); | 
| 920 | 907 | break; | 
| 921 | 908 | case PARTIAL_AUGMENT: | 
| 922 | 909 | startAugment(MAX_PARTIAL_PATH_LENGTH); | 
| 923 | 910 | break; | 
| 924 | 911 | } | 
| 925 | 912 |  | 
| 926 | // Compute node potentials for the original costs | |
| 927 | _arc_vec.clear(); | |
| 928 | _cost_vec.clear(); | |
| 929 |       for (int j = 0; j != _res_arc_num; ++j) {
 | |
| 930 |         if (_res_cap[j] > 0) {
 | |
| 931 | _arc_vec.push_back(IntPair(_source[j], _target[j])); | |
| 932 | 
 | |
| 913 | // Compute node potentials (dual solution) | |
| 914 |       for (int i = 0; i != _res_node_num; ++i) {
 | |
| 915 | _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha)); | |
| 916 | } | |
| 917 | bool optimal = true; | |
| 918 |       for (int i = 0; optimal && i != _res_node_num; ++i) {
 | |
| 919 | LargeCost pi_i = _pi[i]; | |
| 920 | int last_out = _first_out[i+1]; | |
| 921 |         for (int j = _first_out[i]; j != last_out; ++j) {
 | |
| 922 |           if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) {
 | |
| 923 | optimal = false; | |
| 924 | break; | |
| 925 | } | |
| 933 | 926 | } | 
| 934 | 927 | } | 
| 935 | _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); | |
| 936 | 928 |  | 
| 937 | typename BellmanFord<StaticDigraph, LargeCostArcMap> | |
| 938 | ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map); | |
| 939 | bf.distMap(_pi_map); | |
| 940 | bf.init(0); | |
| 941 | 
 | |
| 929 |       if (!optimal) {
 | |
| 930 | // Compute node potentials for the original costs with BellmanFord | |
| 931 | // (if it is necessary) | |
| 932 | typedef std::pair<int, int> IntPair; | |
| 933 | StaticDigraph sgr; | |
| 934 | std::vector<IntPair> arc_vec; | |
| 935 | std::vector<LargeCost> cost_vec; | |
| 936 | LargeCostArcMap cost_map(cost_vec); | |
| 937 |  | |
| 938 | arc_vec.clear(); | |
| 939 | cost_vec.clear(); | |
| 940 |         for (int j = 0; j != _res_arc_num; ++j) {
 | |
| 941 |           if (_res_cap[j] > 0) {
 | |
| 942 | int u = _source[j], v = _target[j]; | |
| 943 | arc_vec.push_back(IntPair(u, v)); | |
| 944 | cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]); | |
| 945 | } | |
| 946 | } | |
| 947 | sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end()); | |
| 948 |  | |
| 949 | typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create | |
| 950 | bf(sgr, cost_map); | |
| 951 | bf.init(0); | |
| 952 | bf.start(); | |
| 953 |  | |
| 954 |         for (int i = 0; i != _res_node_num; ++i) {
 | |
| 955 | _pi[i] += bf.dist(sgr.node(i)); | |
| 956 | } | |
| 957 | } | |
| 958 |  | |
| 959 | // Shift potentials to meet the requirements of the GEQ type | |
| 960 | // optimality conditions | |
| 961 | LargeCost max_pot = _pi[_root]; | |
| 962 |       for (int i = 0; i != _res_node_num; ++i) {
 | |
| 963 | if (_pi[i] > max_pot) max_pot = _pi[i]; | |
| 964 | } | |
| 965 |       if (max_pot != 0) {
 | |
| 966 |         for (int i = 0; i != _res_node_num; ++i) {
 | |
| 967 | _pi[i] -= max_pot; | |
| 968 | } | |
| 969 | } | |
| 942 | 970 |  | 
| 943 | 971 | // Handle non-zero lower bounds | 
| 944 | 972 |       if (_have_lower) {
 | 
| 945 | 973 | int limit = _first_out[_root]; | 
| 946 | 974 |         for (int j = 0; j != limit; ++j) {
 | 
| 947 | 975 | if (!_forward[j]) _res_cap[j] += _lower[j]; | 
| 948 | 976 | } | 
| 949 | 977 | } | 
| 950 | 978 | } | 
| 951 | 979 |  | 
| 952 | 980 | // Initialize a cost scaling phase | 
| 953 | 981 |     void initPhase() {
 | 
| 954 | 982 | // Saturate arcs not satisfying the optimality condition | 
| 955 | 983 |       for (int u = 0; u != _res_node_num; ++u) {
 | 
| 956 | 984 | int last_out = _first_out[u+1]; | 
| 957 | 985 | LargeCost pi_u = _pi[u]; | 
| 958 | 986 |         for (int a = _first_out[u]; a != last_out; ++a) {
 | 
| 959 | 987 | Value delta = _res_cap[a]; | 
| 960 | 988 |           if (delta > 0) {
 | 
| 961 | 989 | int v = _target[a]; | 
| 962 | 990 |             if (_cost[a] + pi_u - _pi[v] < 0) {
 | 
| 963 | 991 | _excess[u] -= delta; | 
| 964 | 992 | _excess[v] += delta; | 
| 965 | 993 | _res_cap[a] = 0; | 
| 966 | 994 | _res_cap[_reverse[a]] += delta; | 
| 967 | 995 | } | 
| 968 | 996 | } | 
| 969 | 997 | } | 
| 970 | 998 | } | 
| 971 | 999 |  | 
| 972 | 1000 | // Find active nodes (i.e. nodes with positive excess) | 
| 973 | 1001 |       for (int u = 0; u != _res_node_num; ++u) {
 | 
| 974 | 1002 | if (_excess[u] > 0) _active_nodes.push_back(u); | 
| 975 | 1003 | } | 
| 976 | 1004 |  | 
| 977 | 1005 | // Initialize the next arcs | 
| 978 | 1006 |       for (int u = 0; u != _res_node_num; ++u) {
 | 
| 979 | 1007 | _next_out[u] = _first_out[u]; | 
| 980 | 1008 | } | 
| 981 | 1009 | } | 
| 982 | 1010 |  | 
| 983 | 1011 | // Price (potential) refinement heuristic | 
| 984 | 1012 |     bool priceRefinement() {
 | 
| 985 | 1013 |  | 
| 986 | 1014 | // Stack for stroing the topological order | 
| 987 | 1015 | IntVector stack(_res_node_num); | 
| 988 | 1016 | int stack_top; | 
| 989 | 1017 |  | 
| 990 | 1018 | // Perform phases | 
| 991 | 1019 |       while (topologicalSort(stack, stack_top)) {
 | 
| 992 | 1020 |  | 
| 993 | 1021 | // Compute node ranks in the acyclic admissible network and | 
| 994 | 1022 | // store the nodes in buckets | 
| 995 | 1023 |         for (int i = 0; i != _res_node_num; ++i) {
 | 
| 996 | 1024 | _rank[i] = 0; | 
| 997 | 1025 | } | 
| 998 | 1026 | const int bucket_end = _root + 1; | 
| 999 | 1027 |         for (int r = 0; r != _max_rank; ++r) {
 | 
| 1000 | 1028 | _buckets[r] = bucket_end; | 
| 1001 | 1029 | } | 
| 1002 | 1030 | int top_rank = 0; | 
| 1003 | 1031 |         for ( ; stack_top >= 0; --stack_top) {
 | 
| 1004 | 1032 | int u = stack[stack_top], v; | 
| 1005 | 1033 | int rank_u = _rank[u]; | 
| 1006 | 1034 |  | 
| 1007 | 1035 | LargeCost rc, pi_u = _pi[u]; | 
| 1008 | 1036 | int last_out = _first_out[u+1]; | 
| 1009 | 1037 |           for (int a = _first_out[u]; a != last_out; ++a) {
 | 
| 1010 | 1038 |             if (_res_cap[a] > 0) {
 | 
| 1011 | 1039 | v = _target[a]; | 
| 1012 | 1040 | rc = _cost[a] + pi_u - _pi[v]; | 
| 1013 | 1041 |               if (rc < 0) {
 | 
| 1014 | 1042 | LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon); | 
| 1015 | 1043 |                 if (nrc < LargeCost(_max_rank)) {
 | 
| 1016 | 1044 | int new_rank_v = rank_u + static_cast<int>(nrc); | 
| 1017 | 1045 |                   if (new_rank_v > _rank[v]) {
 | 
| 1018 | 1046 | _rank[v] = new_rank_v; | 
| 1019 | 1047 | } | 
| 1020 | 1048 | } | 
| 1021 | 1049 | } | 
| 1022 | 1050 | } | 
| 1023 | 1051 | } | 
| 1024 | 1052 |  | 
| 1025 | 1053 |           if (rank_u > 0) {
 | 
| 1026 | 1054 | top_rank = std::max(top_rank, rank_u); | 
| 1027 | 1055 | int bfirst = _buckets[rank_u]; | 
| 1028 | 1056 | _bucket_next[u] = bfirst; | 
| 1029 | 1057 | _bucket_prev[bfirst] = u; | 
| 1030 | 1058 | _buckets[rank_u] = u; | 
| 1031 | 1059 | } | 
| 1032 | 1060 | } | 
| 1033 | 1061 |  | 
| 1034 | 1062 | // Check if the current flow is epsilon-optimal | 
| 1035 | 1063 |         if (top_rank == 0) {
 | 
| 1036 | 1064 | return true; | 
| 1037 | 1065 | } | 
| 1038 | 1066 |  | 
| 1039 | 1067 | // Process buckets in top-down order | 
| 1040 | 1068 |         for (int rank = top_rank; rank > 0; --rank) {
 | 
| 1041 | 1069 |           while (_buckets[rank] != bucket_end) {
 | 
| 1042 | 1070 | // Remove the first node from the current bucket | 
| 1043 | 1071 | int u = _buckets[rank]; | 
| 1044 | 1072 | _buckets[rank] = _bucket_next[u]; | 
| 1045 | 1073 |  | 
| 1046 | 1074 | // Search the outgoing arcs of u | 
| 1047 | 1075 | LargeCost rc, pi_u = _pi[u]; | 
| 1048 | 1076 | int last_out = _first_out[u+1]; | 
| 1049 | 1077 | int v, old_rank_v, new_rank_v; | 
| 1050 | 1078 |             for (int a = _first_out[u]; a != last_out; ++a) {
 | 
| 1051 | 1079 |               if (_res_cap[a] > 0) {
 | 
| 1052 | 1080 | v = _target[a]; | 
| 1053 | 1081 | old_rank_v = _rank[v]; | 
| 1054 | 1082 |  | 
| 1055 | 1083 |                 if (old_rank_v < rank) {
 | 
| 1056 | 1084 |  | 
| 1057 | 1085 | // Compute the new rank of node v | 
| 1058 | 1086 | rc = _cost[a] + pi_u - _pi[v]; | 
| 1059 | 1087 |                   if (rc < 0) {
 | 
| 1060 | 1088 | new_rank_v = rank; | 
| 1061 | 1089 |                   } else {
 | 
| 1062 | 1090 | LargeCost nrc = rc / _epsilon; | 
| 1063 | 1091 | new_rank_v = 0; | 
| 1064 | 1092 |                     if (nrc < LargeCost(_max_rank)) {
 | 
| 1065 | 1093 | new_rank_v = rank - 1 - static_cast<int>(nrc); | 
| 1066 | 1094 | } | 
| 1067 | 1095 | } | 
| 1068 | 1096 |  | 
| 1069 | 1097 | // Change the rank of node v | 
| 1070 | 1098 |                   if (new_rank_v > old_rank_v) {
 | 
| 1071 | 1099 | _rank[v] = new_rank_v; | 
| 1072 | 1100 |  | 
| 1073 | 1101 | // Remove v from its old bucket | 
| 1074 | 1102 |                     if (old_rank_v > 0) {
 | 
| 1075 | 1103 |                       if (_buckets[old_rank_v] == v) {
 | 
| 1076 | 1104 | _buckets[old_rank_v] = _bucket_next[v]; | 
| 1077 | 1105 |                       } else {
 | 
| 1078 | 1106 | int pv = _bucket_prev[v], nv = _bucket_next[v]; | 
| 1079 | 1107 | _bucket_next[pv] = nv; | 
| 1080 | 1108 | _bucket_prev[nv] = pv; | 
| 1081 | 1109 | } | 
| 1082 | 1110 | } | 
| 1083 | 1111 |  | 
| 1084 | 1112 | // Insert v into its new bucket | 
| 1085 | 1113 | int nv = _buckets[new_rank_v]; | 
| 1086 | 1114 | _bucket_next[v] = nv; | 
| 1087 | 1115 | _bucket_prev[nv] = v; | 
| 1088 | 1116 | _buckets[new_rank_v] = v; | 
| 1089 | 1117 | } | 
| 1090 | 1118 | } | 
| 1091 | 1119 | } | 
| 1092 | 1120 | } | 
| 1093 | 1121 |  | 
| 1094 | 1122 | // Refine potential of node u | 
| 1095 | 1123 | _pi[u] -= rank * _epsilon; | 
| 1096 | 1124 | } | 
| 1097 | 1125 | } | 
| 1098 | 1126 |  | 
| 1099 | 1127 | } | 
| 1100 | 1128 |  | 
| 1101 | 1129 | return false; | 
| 1102 | 1130 | } | 
| 1103 | 1131 |  | 
| 1104 | 1132 | // Find and cancel cycles in the admissible network and | 
| 1105 | 1133 | // determine topological order using DFS | 
| 1106 | 1134 |     bool topologicalSort(IntVector &stack, int &stack_top) {
 | 
| 1107 | 1135 | const int MAX_CYCLE_CANCEL = 1; | 
| 1108 | 1136 |  | 
| 1109 | 1137 | BoolVector reached(_res_node_num, false); | 
| 1110 | 1138 | BoolVector processed(_res_node_num, false); | 
| 1111 | 1139 | IntVector pred(_res_node_num); | 
| 1112 | 1140 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 1113 | 1141 | _next_out[i] = _first_out[i]; | 
| 1114 | 1142 | } | 
| 1115 | 1143 | stack_top = -1; | 
| 1116 | 1144 |  | 
| 1117 | 1145 | int cycle_cnt = 0; | 
| 1118 | 1146 |       for (int start = 0; start != _res_node_num; ++start) {
 | 
| 1119 | 1147 | if (reached[start]) continue; | 
| 1120 | 1148 |  | 
| 1121 | 1149 | // Start DFS search from this start node | 
| 1122 | 1150 | pred[start] = -1; | 
| 1123 | 1151 | int tip = start, v; | 
| 1124 | 1152 |         while (true) {
 | 
| 1125 | 1153 | // Check the outgoing arcs of the current tip node | 
| 1126 | 1154 | reached[tip] = true; | 
| 1127 | 1155 | LargeCost pi_tip = _pi[tip]; | 
| 1128 | 1156 | int a, last_out = _first_out[tip+1]; | 
| 1129 | 1157 |           for (a = _next_out[tip]; a != last_out; ++a) {
 | 
| 1130 | 1158 |             if (_res_cap[a] > 0) {
 | 
| 1131 | 1159 | v = _target[a]; | 
| 1132 | 1160 |               if (_cost[a] + pi_tip - _pi[v] < 0) {
 | 
| 1133 | 1161 |                 if (!reached[v]) {
 | 
| 1134 | 1162 | // A new node is reached | 
| 1135 | 1163 | reached[v] = true; | 
| 1136 | 1164 | pred[v] = tip; | 
| 1137 | 1165 | _next_out[tip] = a; | 
| 1138 | 1166 | tip = v; | 
| 1139 | 1167 | a = _next_out[tip]; | 
| 1140 | 1168 | last_out = _first_out[tip+1]; | 
| 1141 | 1169 | break; | 
| 1142 | 1170 | } | 
| 1143 | 1171 |                 else if (!processed[v]) {
 | 
| 1144 | 1172 | // A cycle is found | 
| 1145 | 1173 | ++cycle_cnt; | 
| 1146 | 1174 | _next_out[tip] = a; | 
| 1147 | 1175 |  | 
| 1148 | 1176 | // Find the minimum residual capacity along the cycle | 
| 1149 | 1177 | Value d, delta = _res_cap[a]; | 
| 1150 | 1178 | int u, delta_node = tip; | 
| 1151 | 1179 |                   for (u = tip; u != v; ) {
 | 
| 1152 | 1180 | u = pred[u]; | 
| 1153 | 1181 | d = _res_cap[_next_out[u]]; | 
| 1154 | 1182 |                     if (d <= delta) {
 | 
| 1155 | 1183 | delta = d; | 
| 1156 | 1184 | delta_node = u; | 
| 1157 | 1185 | } | 
| 1158 | 1186 | } | 
| 1159 | 1187 |  | 
| 1160 | 1188 | // Augment along the cycle | 
| 1161 | 1189 | _res_cap[a] -= delta; | 
| 1162 | 1190 | _res_cap[_reverse[a]] += delta; | 
| 1163 | 1191 |                   for (u = tip; u != v; ) {
 | 
| 1164 | 1192 | u = pred[u]; | 
| 1165 | 1193 | int ca = _next_out[u]; | 
| 1166 | 1194 | _res_cap[ca] -= delta; | 
| 1167 | 1195 | _res_cap[_reverse[ca]] += delta; | 
| 1168 | 1196 | } | 
| 1169 | 1197 |  | 
| 1170 | 1198 | // Check the maximum number of cycle canceling | 
| 1171 | 1199 |                   if (cycle_cnt >= MAX_CYCLE_CANCEL) {
 | 
| 1172 | 1200 | return false; | 
| 1173 | 1201 | } | 
| 1174 | 1202 |  | 
| 1175 | 1203 | // Roll back search to delta_node | 
| 1176 | 1204 |                   if (delta_node != tip) {
 | 
| 1177 | 1205 |                     for (u = tip; u != delta_node; u = pred[u]) {
 | 
| 1178 | 1206 | reached[u] = false; | 
| 1179 | 1207 | } | 
| 1180 | 1208 | tip = delta_node; | 
| 1181 | 1209 | a = _next_out[tip] + 1; | 
| 1182 | 1210 | last_out = _first_out[tip+1]; | 
| 1183 | 1211 | break; | 
| 1184 | 1212 | } | 
| 1185 | 1213 | } | 
| 1186 | 1214 | } | 
| 1187 | 1215 | } | 
| 1188 | 1216 | } | 
| 1189 | 1217 |  | 
| 1190 | 1218 | // Step back to the previous node | 
| 1191 | 1219 |           if (a == last_out) {
 | 
| 1192 | 1220 | processed[tip] = true; | 
| 1193 | 1221 | stack[++stack_top] = tip; | 
| 1194 | 1222 | tip = pred[tip]; | 
| 1195 | 1223 |             if (tip < 0) {
 | 
| 1196 | 1224 | // Finish DFS from the current start node | 
| 1197 | 1225 | break; | 
| 1198 | 1226 | } | 
| 1199 | 1227 | ++_next_out[tip]; | 
| 1200 | 1228 | } | 
| 1201 | 1229 | } | 
| 1202 | 1230 |  | 
| 1203 | 1231 | } | 
| 1204 | 1232 |  | 
| 1205 | 1233 | return (cycle_cnt == 0); | 
| 1206 | 1234 | } | 
| 1207 | 1235 |  | 
| 1208 | 1236 | // Global potential update heuristic | 
| 1209 | 1237 |     void globalUpdate() {
 | 
| 1210 | 1238 | const int bucket_end = _root + 1; | 
| 1211 | 1239 |  | 
| 1212 | 1240 | // Initialize buckets | 
| 1213 | 1241 |       for (int r = 0; r != _max_rank; ++r) {
 | 
| 1214 | 1242 | _buckets[r] = bucket_end; | 
| 1215 | 1243 | } | 
| 1216 | 1244 | Value total_excess = 0; | 
| 1217 | 1245 | int b0 = bucket_end; | 
| 1218 | 1246 |       for (int i = 0; i != _res_node_num; ++i) {
 | 
| 1219 | 1247 |         if (_excess[i] < 0) {
 | 
| 1220 | 1248 | _rank[i] = 0; | 
| 1221 | 1249 | _bucket_next[i] = b0; | 
| 1222 | 1250 | _bucket_prev[b0] = i; | 
| 1223 | 1251 | b0 = i; | 
| 1224 | 1252 |         } else {
 | 
| 1225 | 1253 | total_excess += _excess[i]; | 
| 1226 | 1254 | _rank[i] = _max_rank; | 
| 1227 | 1255 | } | 
| 1228 | 1256 | } | 
| 1229 | 1257 | if (total_excess == 0) return; | 
| 1230 | 1258 | _buckets[0] = b0; | 
| 1231 | 1259 |  | 
| 1232 | 1260 | // Search the buckets | 
| 1233 | 1261 | int r = 0; | 
| 1234 | 1262 |       for ( ; r != _max_rank; ++r) {
 | 
| 1235 | 1263 |         while (_buckets[r] != bucket_end) {
 | 
| 1236 | 1264 | // Remove the first node from the current bucket | 
| 1237 | 1265 | int u = _buckets[r]; | 
| 1238 | 1266 | _buckets[r] = _bucket_next[u]; | 
| 1239 | 1267 |  | 
| 1240 | 1268 | // Search the incomming arcs of u | 
| 1241 | 1269 | LargeCost pi_u = _pi[u]; | 
| 1242 | 1270 | int last_out = _first_out[u+1]; | 
| 1243 | 1271 |           for (int a = _first_out[u]; a != last_out; ++a) {
 | 
| 1244 | 1272 | int ra = _reverse[a]; | 
| 1245 | 1273 |             if (_res_cap[ra] > 0) {
 | 
| 1246 | 1274 | int v = _source[ra]; | 
| 1247 | 1275 | int old_rank_v = _rank[v]; | 
| 1248 | 1276 |               if (r < old_rank_v) {
 | 
| 1249 | 1277 | // Compute the new rank of v | 
| 1250 | 1278 | LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; | 
| 1251 | 1279 | int new_rank_v = old_rank_v; | 
| 1252 | 1280 |                 if (nrc < LargeCost(_max_rank)) {
 | 
| 1253 | 1281 | new_rank_v = r + 1 + static_cast<int>(nrc); | 
| 1254 | 1282 | } | 
| 1255 | 1283 |  | 
| 1256 | 1284 | // Change the rank of v | 
| 1257 | 1285 |                 if (new_rank_v < old_rank_v) {
 | 
| 1258 | 1286 | _rank[v] = new_rank_v; | 
| 1259 | 1287 | _next_out[v] = _first_out[v]; | 
| 1260 | 1288 |  | 
| 1261 | 1289 | // Remove v from its old bucket | 
| 1262 | 1290 |                   if (old_rank_v < _max_rank) {
 | 
| 1263 | 1291 |                     if (_buckets[old_rank_v] == v) {
 | 
| 1264 | 1292 | _buckets[old_rank_v] = _bucket_next[v]; | 
| 1265 | 1293 |                     } else {
 | 
| 1266 | 1294 | int pv = _bucket_prev[v], nv = _bucket_next[v]; | 
| 1267 | 1295 | _bucket_next[pv] = nv; | 
| 1268 | 1296 | _bucket_prev[nv] = pv; | 
| 1269 | 1297 | } | 
| 1270 | 1298 | } | 
| 1271 | 1299 |  | 
| 1272 | 1300 | // Insert v into its new bucket | 
| 1273 | 1301 | int nv = _buckets[new_rank_v]; | 
| 1274 | 1302 | _bucket_next[v] = nv; | 
| 1275 | 1303 | _bucket_prev[nv] = v; | 
| 1276 | 1304 | _buckets[new_rank_v] = v; | 
| 1277 | 1305 | } | 
| 1278 | 1306 | } | 
| 1279 | 1307 | } | 
| 1280 | 1308 | } | 
| 1281 | 1309 |  | 
| 1282 | 1310 | // Finish search if there are no more active nodes | 
| 1283 | 1311 |           if (_excess[u] > 0) {
 | 
| 1284 | 1312 | total_excess -= _excess[u]; | 
| 1285 | 1313 | if (total_excess <= 0) break; | 
| 1286 | 1314 | } | 
| 1287 | 1315 | } | 
| 1288 | 1316 | if (total_excess <= 0) break; | 
| 1289 | 1317 | } | 
| 1290 | 1318 |  | 
| 1291 | 1319 | // Relabel nodes | 
| 1292 | 1320 |       for (int u = 0; u != _res_node_num; ++u) {
 | 
| 1293 | 1321 | int k = std::min(_rank[u], r); | 
| 1294 | 1322 |         if (k > 0) {
 | 
| 1295 | 1323 | _pi[u] -= _epsilon * k; | 
| 1296 | 1324 | _next_out[u] = _first_out[u]; | 
| 1297 | 1325 | } | 
| 1298 | 1326 | } | 
| 1299 | 1327 | } | 
| 1300 | 1328 |  | 
| 1301 | 1329 | /// Execute the algorithm performing augment and relabel operations | 
| 1302 | 1330 |     void startAugment(int max_length) {
 | 
| 1303 | 1331 | // Paramters for heuristics | 
| 1304 | 1332 | const int PRICE_REFINEMENT_LIMIT = 2; | 
| 1305 | 1333 | const double GLOBAL_UPDATE_FACTOR = 1.0; | 
| 1306 | 1334 | const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * | 
| 1307 | 1335 | (_res_node_num + _sup_node_num * _sup_node_num)); | 
| 1308 | 1336 | int next_global_update_limit = global_update_skip; | 
| 1309 | 1337 |  | 
| 1310 | 1338 | // Perform cost scaling phases | 
| 1311 | 1339 | IntVector path; | 
| 1312 | 1340 | BoolVector path_arc(_res_arc_num, false); | 
| 1313 | 1341 | int relabel_cnt = 0; | 
| 1314 | 1342 | int eps_phase_cnt = 0; | 
| 1315 | 1343 | for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? | 
| 1316 | 1344 | 1 : _epsilon / _alpha ) | 
| 1317 | 1345 |       {
 | 
| 1318 | 1346 | ++eps_phase_cnt; | 
| 1319 | 1347 |  | 
| 1320 | 1348 | // Price refinement heuristic | 
| 1321 | 1349 |         if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
 | 
| 1322 | 1350 | if (priceRefinement()) continue; | 
| 1323 | 1351 | } | 
| 1324 | 1352 |  | 
| 1325 | 1353 | // Initialize current phase | 
| 1326 | 1354 | initPhase(); | 
| 1327 | 1355 |  | 
| 1328 | 1356 | // Perform partial augment and relabel operations | 
| 1329 | 1357 |         while (true) {
 | 
| 1330 | 1358 | // Select an active node (FIFO selection) | 
| 1331 | 1359 | while (_active_nodes.size() > 0 && | 
| 1332 | 1360 |                  _excess[_active_nodes.front()] <= 0) {
 | 
| 1333 | 1361 | _active_nodes.pop_front(); | 
| 1334 | 1362 | } | 
| 1335 | 1363 | if (_active_nodes.size() == 0) break; | 
| 1336 | 1364 | int start = _active_nodes.front(); | 
| 1337 | 1365 |  | 
| 1338 | 1366 | // Find an augmenting path from the start node | 
| 1339 | 1367 | int tip = start; | 
| 1340 | 1368 |           while (int(path.size()) < max_length && _excess[tip] >= 0) {
 | 
| 1341 | 1369 | int u; | 
| 1342 | 1370 | LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max(); | 
| 1343 | 1371 | LargeCost pi_tip = _pi[tip]; | 
| 1344 | 1372 | int last_out = _first_out[tip+1]; | 
| 1345 | 1373 |             for (int a = _next_out[tip]; a != last_out; ++a) {
 | 
| 1346 | 1374 |               if (_res_cap[a] > 0) {
 | 
| 1347 | 1375 | u = _target[a]; | 
| 1348 | 1376 | rc = _cost[a] + pi_tip - _pi[u]; | 
| 1349 | 1377 |                 if (rc < 0) {
 | 
| 1350 | 1378 | path.push_back(a); | 
| 1351 | 1379 | _next_out[tip] = a; | 
| 1352 | 1380 |                   if (path_arc[a]) {
 | 
| 1353 | 1381 | goto augment; // a cycle is found, stop path search | 
| 1354 | 1382 | } | 
| 1355 | 1383 | tip = u; | 
| 1356 | 1384 | path_arc[a] = true; | 
| 1357 | 1385 | goto next_step; | 
| 1358 | 1386 | } | 
| 1359 | 1387 |                 else if (rc < min_red_cost) {
 | 
| 1360 | 1388 | min_red_cost = rc; | 
| 1361 | 1389 | } | 
| 1362 | 1390 | } | 
| 1363 | 1391 | } | 
| 1364 | 1392 |  | 
| 1365 | 1393 | // Relabel tip node | 
| 1366 | 1394 |             if (tip != start) {
 | 
| 1367 | 1395 | int ra = _reverse[path.back()]; | 
| 1368 | 1396 | min_red_cost = | 
| 1369 | 1397 | std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]); | 
| 1370 | 1398 | } | 
| 1371 | 1399 | last_out = _next_out[tip]; | 
| 1372 | 1400 |             for (int a = _first_out[tip]; a != last_out; ++a) {
 | 
| 1373 | 1401 |               if (_res_cap[a] > 0) {
 | 
| 1374 | 1402 | rc = _cost[a] + pi_tip - _pi[_target[a]]; | 
| 1375 | 1403 |                 if (rc < min_red_cost) {
 | 
| 1376 | 1404 | min_red_cost = rc; | 
| 1377 | 1405 | } | 
| 1378 | 1406 | } | 
| 1379 | 1407 | } | 
| 1380 | 1408 | _pi[tip] -= min_red_cost + _epsilon; | 
| 1381 | 1409 | _next_out[tip] = _first_out[tip]; | 
| 1382 | 1410 | ++relabel_cnt; | 
| 1383 | 1411 |  | 
| 1384 | 1412 | // Step back | 
| 1385 | 1413 |             if (tip != start) {
 | 
| 1386 | 1414 | int pa = path.back(); | 
| 1387 | 1415 | path_arc[pa] = false; | 
| 1388 | 1416 | tip = _source[pa]; | 
| 1389 | 1417 | path.pop_back(); | 
| 1390 | 1418 | } | 
| 1391 | 1419 |  | 
| 1392 | 1420 | next_step: ; | 
| 1393 | 1421 | } | 
| 1394 | 1422 |  | 
| 1395 | 1423 | // Augment along the found path (as much flow as possible) | 
| 1396 | 1424 | augment: | 
| 1397 | 1425 | Value delta; | 
| 1398 | 1426 | int pa, u, v = start; | 
| 1399 | 1427 |           for (int i = 0; i != int(path.size()); ++i) {
 | 
| 1400 | 1428 | pa = path[i]; | 
| 1401 | 1429 | u = v; | 
| 1402 | 1430 | v = _target[pa]; | 
| 1403 | 1431 | path_arc[pa] = false; | 
| 1404 | 1432 | delta = std::min(_res_cap[pa], _excess[u]); | 
| 1405 | 1433 | _res_cap[pa] -= delta; | 
| 1406 | 1434 | _res_cap[_reverse[pa]] += delta; | 
| 1407 | 1435 | _excess[u] -= delta; | 
| 1408 | 1436 | _excess[v] += delta; | 
| 1409 | 1437 |             if (_excess[v] > 0 && _excess[v] <= delta) {
 | 
| 1410 | 1438 | _active_nodes.push_back(v); | 
| 1411 | 1439 | } | 
| 1412 | 1440 | } | 
| 1413 | 1441 | path.clear(); | 
| 1414 | 1442 |  | 
| 1415 | 1443 | // Global update heuristic | 
| 1416 | 1444 |           if (relabel_cnt >= next_global_update_limit) {
 | 
| 1417 | 1445 | globalUpdate(); | 
| 1418 | 1446 | next_global_update_limit += global_update_skip; | 
| 1419 | 1447 | } | 
| 1420 | 1448 | } | 
| 1421 | 1449 |  | 
| 1422 | 1450 | } | 
| 1423 | 1451 |  | 
| 1424 | 1452 | } | 
| 1425 | 1453 |  | 
| 1426 | 1454 | /// Execute the algorithm performing push and relabel operations | 
| 1427 | 1455 |     void startPush() {
 | 
| 1428 | 1456 | // Paramters for heuristics | 
| 1429 | 1457 | const int PRICE_REFINEMENT_LIMIT = 2; | 
| 1430 | 1458 | const double GLOBAL_UPDATE_FACTOR = 2.0; | 
| 1431 | 1459 |  | 
| 1432 | 1460 | const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * | 
| 1433 | 1461 | (_res_node_num + _sup_node_num * _sup_node_num)); | 
| 1434 | 1462 | int next_global_update_limit = global_update_skip; | 
| 1435 | 1463 |  | 
| 1436 | 1464 | // Perform cost scaling phases | 
| 1437 | 1465 | BoolVector hyper(_res_node_num, false); | 
| 1438 | 1466 | LargeCostVector hyper_cost(_res_node_num); | 
| 1439 | 1467 | int relabel_cnt = 0; | 
| 1440 | 1468 | int eps_phase_cnt = 0; | 
| 1441 | 1469 | for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? | 
| 1442 | 1470 | 1 : _epsilon / _alpha ) | 
| 1443 | 1471 |       {
 | 
| 1444 | 1472 | ++eps_phase_cnt; | 
| 1445 | 1473 |  | 
| 1446 | 1474 | // Price refinement heuristic | 
| 1447 | 1475 |         if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
 | 
| 1448 | 1476 | if (priceRefinement()) continue; | 
| 1449 | 1477 | } | 
| 1450 | 1478 |  | 
| 1451 | 1479 | // Initialize current phase | 
| 1452 | 1480 | initPhase(); | 
| 1453 | 1481 |  | 
| 1454 | 1482 | // Perform push and relabel operations | 
| 1455 | 1483 |         while (_active_nodes.size() > 0) {
 | 
| 1456 | 1484 | LargeCost min_red_cost, rc, pi_n; | 
| 1457 | 1485 | Value delta; | 
| 1458 | 1486 | int n, t, a, last_out = _res_arc_num; | 
| 1459 | 1487 |  | 
| 1460 | 1488 | next_node: | 
| 1461 | 1489 | // Select an active node (FIFO selection) | 
| 1462 | 1490 | n = _active_nodes.front(); | 
| 1463 | 1491 | last_out = _first_out[n+1]; | 
| 1464 | 1492 | pi_n = _pi[n]; | 
| 1465 | 1493 |  | 
| 1466 | 1494 | // Perform push operations if there are admissible arcs | 
| 1467 | 1495 |           if (_excess[n] > 0) {
 | 
| 1468 | 1496 |             for (a = _next_out[n]; a != last_out; ++a) {
 | 
| 1469 | 1497 | if (_res_cap[a] > 0 && | 
| 1470 | 1498 |                   _cost[a] + pi_n - _pi[_target[a]] < 0) {
 | 
| 1471 | 1499 | delta = std::min(_res_cap[a], _excess[n]); | 
| 1472 | 1500 | t = _target[a]; | 
| 1473 | 1501 |  | 
| 1474 | 1502 | // Push-look-ahead heuristic | 
| 1475 | 1503 | Value ahead = -_excess[t]; | 
| 1476 | 1504 | int last_out_t = _first_out[t+1]; | 
| 1477 | 1505 | LargeCost pi_t = _pi[t]; | 
| 1478 | 1506 |                 for (int ta = _next_out[t]; ta != last_out_t; ++ta) {
 | 
| 1479 | 1507 | if (_res_cap[ta] > 0 && | 
| 1480 | 1508 | _cost[ta] + pi_t - _pi[_target[ta]] < 0) | 
| 1481 | 1509 | ahead += _res_cap[ta]; | 
| 1482 | 1510 | if (ahead >= delta) break; | 
| 1483 | 1511 | } | 
| 1484 | 1512 | if (ahead < 0) ahead = 0; | 
| 1485 | 1513 |  | 
| 1486 | 1514 | // Push flow along the arc | 
| 1487 | 1515 |                 if (ahead < delta && !hyper[t]) {
 | 
| 1488 | 1516 | _res_cap[a] -= ahead; | 
| 1489 | 1517 | _res_cap[_reverse[a]] += ahead; | 
| 1490 | 1518 | _excess[n] -= ahead; | 
| 1491 | 1519 | _excess[t] += ahead; | 
| 1492 | 1520 | _active_nodes.push_front(t); | 
| 1493 | 1521 | hyper[t] = true; | 
| 1494 | 1522 | hyper_cost[t] = _cost[a] + pi_n - pi_t; | 
| 1495 | 1523 | _next_out[n] = a; | 
| 1496 | 1524 | goto next_node; | 
| 1497 | 1525 |                 } else {
 | 
| 1498 | 1526 | _res_cap[a] -= delta; | 
| 1499 | 1527 | _res_cap[_reverse[a]] += delta; | 
| 1500 | 1528 | _excess[n] -= delta; | 
| 1501 | 1529 | _excess[t] += delta; | 
| 1502 | 1530 | if (_excess[t] > 0 && _excess[t] <= delta) | 
| 1503 | 1531 | _active_nodes.push_back(t); | 
| 1504 | 1532 | } | 
| 1505 | 1533 |  | 
| 1506 | 1534 |                 if (_excess[n] == 0) {
 | 
| 1507 | 1535 | _next_out[n] = a; | 
| 1508 | 1536 | goto remove_nodes; | 
| 1509 | 1537 | } | 
| 1510 | 1538 | } | 
| 1511 | 1539 | } | 
| 1512 | 1540 | _next_out[n] = a; | 
| 1513 | 1541 | } | 
| 1514 | 1542 |  | 
| 1515 | 1543 | // Relabel the node if it is still active (or hyper) | 
| 1516 | 1544 |           if (_excess[n] > 0 || hyper[n]) {
 | 
| 1517 | 1545 | min_red_cost = hyper[n] ? -hyper_cost[n] : | 
| 1518 | 1546 | std::numeric_limits<LargeCost>::max(); | 
| 1519 | 1547 |             for (int a = _first_out[n]; a != last_out; ++a) {
 | 
| 1520 | 1548 |               if (_res_cap[a] > 0) {
 | 
| 1521 | 1549 | rc = _cost[a] + pi_n - _pi[_target[a]]; | 
| 1522 | 1550 |                 if (rc < min_red_cost) {
 | 
| 1523 | 1551 | min_red_cost = rc; | 
| 1524 | 1552 | } | 
| 1525 | 1553 | } | 
| 1526 | 1554 | } | 
| 1527 | 1555 | _pi[n] -= min_red_cost + _epsilon; | 
| 1528 | 1556 | _next_out[n] = _first_out[n]; | 
| 1529 | 1557 | hyper[n] = false; | 
| 1530 | 1558 | ++relabel_cnt; | 
| 1531 | 1559 | } | 
| 1532 | 1560 |  | 
| 1533 | 1561 | // Remove nodes that are not active nor hyper | 
| 1534 | 1562 | remove_nodes: | 
| 1535 | 1563 | while ( _active_nodes.size() > 0 && | 
| 1536 | 1564 | _excess[_active_nodes.front()] <= 0 && | 
| 1537 | 1565 |                   !hyper[_active_nodes.front()] ) {
 | 
| 1538 | 1566 | _active_nodes.pop_front(); | 
| 1539 | 1567 | } | 
| 1540 | 1568 |  | 
| 1541 | 1569 | // Global update heuristic | 
| 1542 | 1570 |           if (relabel_cnt >= next_global_update_limit) {
 | 
| 1543 | 1571 | globalUpdate(); | 
| 1544 | 1572 | for (int u = 0; u != _res_node_num; ++u) | 
| 1545 | 1573 | hyper[u] = false; | 
| 1546 | 1574 | next_global_update_limit += global_update_skip; | 
| 1547 | 1575 | } | 
| 1548 | 1576 | } | 
| 1549 | 1577 | } | 
| 1550 | 1578 | } | 
| 1551 | 1579 |  | 
| 1552 | 1580 | }; //class CostScaling | 
| 1553 | 1581 |  | 
| 1554 | 1582 | ///@} | 
| 1555 | 1583 |  | 
| 1556 | 1584 | } //namespace lemon | 
| 1557 | 1585 |  | 
| 1558 | 1586 | #endif //LEMON_COST_SCALING_H | 
0 comments (0 inline)
     
          





