lemon/cost_scaling.h
changeset 1177 3c00344f49c9
parent 1093 fb1c7da561ce
parent 1110 c0c2f5c87aa6
     1.1 --- a/lemon/cost_scaling.h	Mon Jul 16 16:21:40 2018 +0200
     1.2 +++ b/lemon/cost_scaling.h	Wed Oct 17 19:14:07 2018 +0200
     1.3 @@ -2,7 +2,7 @@
     1.4   *
     1.5   * This file is a part of LEMON, a generic C++ optimization library.
     1.6   *
     1.7 - * Copyright (C) 2003-2010
     1.8 + * Copyright (C) 2003-2013
     1.9   * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
    1.10   * (Egervary Research Group on Combinatorial Optimization, EGRES).
    1.11   *
    1.12 @@ -91,11 +91,18 @@
    1.13    ///
    1.14    /// \ref CostScaling implements a cost scaling algorithm that performs
    1.15    /// push/augment and relabel operations for finding a \ref min_cost_flow
    1.16 -  /// "minimum cost flow" \ref amo93networkflows, \ref goldberg90approximation,
    1.17 -  /// \ref goldberg97efficient, \ref bunnagel98efficient.
    1.18 +  /// "minimum cost flow" \cite amo93networkflows,
    1.19 +  /// \cite goldberg90approximation,
    1.20 +  /// \cite goldberg97efficient, \cite bunnagel98efficient.
    1.21    /// It is a highly efficient primal-dual solution method, which
    1.22    /// can be viewed as the generalization of the \ref Preflow
    1.23    /// "preflow push-relabel" algorithm for the maximum flow problem.
    1.24 +  /// It is a polynomial algorithm, its running time complexity is
    1.25 +  /// \f$O(n^2m\log(nK))\f$, where <i>K</i> denotes the maximum arc cost.
    1.26 +  ///
    1.27 +  /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest
    1.28 +  /// implementations available in LEMON for solving this problem.
    1.29 +  /// (For more information, see \ref min_cost_flow_algs "the module page".)
    1.30    ///
    1.31    /// Most of the parameters of the problem (except for the digraph)
    1.32    /// can be given using separate functions, and the algorithm can be
    1.33 @@ -113,10 +120,11 @@
    1.34    /// In most cases, this parameter should not be set directly,
    1.35    /// consider to use the named template parameters instead.
    1.36    ///
    1.37 -  /// \warning Both number types must be signed and all input data must
    1.38 +  /// \warning Both \c V and \c C must be signed number types.
    1.39 +  /// \warning All input data (capacities, supply values, and costs) must
    1.40    /// be integer.
    1.41 -  /// \warning This algorithm does not support negative costs for such
    1.42 -  /// arcs that have infinite upper bound.
    1.43 +  /// \warning This algorithm does not support negative costs for
    1.44 +  /// arcs having infinite upper bound.
    1.45    ///
    1.46    /// \note %CostScaling provides three different internal methods,
    1.47    /// from which the most efficient one is used by default.
    1.48 @@ -145,7 +153,8 @@
    1.49      /// otherwise it is \c double.
    1.50      typedef typename TR::LargeCost LargeCost;
    1.51  
    1.52 -    /// The \ref CostScalingDefaultTraits "traits class" of the algorithm
    1.53 +    /// \brief The \ref lemon::CostScalingDefaultTraits "traits class"
    1.54 +    /// of the algorithm
    1.55      typedef TR Traits;
    1.56  
    1.57    public:
    1.58 @@ -178,7 +187,7 @@
    1.59      /// in their base operations, which are used in conjunction with the
    1.60      /// relabel operation.
    1.61      /// By default, the so called \ref PARTIAL_AUGMENT
    1.62 -    /// "Partial Augment-Relabel" method is used, which proved to be
    1.63 +    /// "Partial Augment-Relabel" method is used, which turned out to be
    1.64      /// the most efficient and the most robust on various test inputs.
    1.65      /// However, the other methods can be selected using the \ref run()
    1.66      /// function with the proper parameter.
    1.67 @@ -205,7 +214,8 @@
    1.68      typedef std::vector<Cost> CostVector;
    1.69      typedef std::vector<LargeCost> LargeCostVector;
    1.70      typedef std::vector<char> BoolVector;
    1.71 -    // Note: vector<char> is used instead of vector<bool> for efficiency reasons
    1.72 +    // Note: vector<char> is used instead of vector<bool>
    1.73 +    // for efficiency reasons
    1.74  
    1.75    private:
    1.76  
    1.77 @@ -233,7 +243,6 @@
    1.78        std::vector<Value>& _v;
    1.79      };
    1.80  
    1.81 -    typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap;
    1.82      typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap;
    1.83  
    1.84    private:
    1.85 @@ -247,7 +256,7 @@
    1.86      int _root;
    1.87  
    1.88      // Parameters of the problem
    1.89 -    bool _have_lower;
    1.90 +    bool _has_lower;
    1.91      Value _sum_supply;
    1.92      int _sup_node_num;
    1.93  
    1.94 @@ -284,14 +293,6 @@
    1.95      IntVector _rank;
    1.96      int _max_rank;
    1.97  
    1.98 -    // Data for a StaticDigraph structure
    1.99 -    typedef std::pair<int, int> IntPair;
   1.100 -    StaticDigraph _sgr;
   1.101 -    std::vector<IntPair> _arc_vec;
   1.102 -    std::vector<LargeCost> _cost_vec;
   1.103 -    LargeCostArcMap _cost_map;
   1.104 -    LargeCostNodeMap _pi_map;
   1.105 -
   1.106    public:
   1.107  
   1.108      /// \brief Constant for infinite upper bounds (capacities).
   1.109 @@ -338,7 +339,6 @@
   1.110      /// \param graph The digraph the algorithm runs on.
   1.111      CostScaling(const GR& graph) :
   1.112        _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph),
   1.113 -      _cost_map(_cost_vec), _pi_map(_pi),
   1.114        INF(std::numeric_limits<Value>::has_infinity ?
   1.115            std::numeric_limits<Value>::infinity() :
   1.116            std::numeric_limits<Value>::max())
   1.117 @@ -372,10 +372,9 @@
   1.118      /// \return <tt>(*this)</tt>
   1.119      template <typename LowerMap>
   1.120      CostScaling& lowerMap(const LowerMap& map) {
   1.121 -      _have_lower = true;
   1.122 +      _has_lower = true;
   1.123        for (ArcIt a(_graph); a != INVALID; ++a) {
   1.124          _lower[_arc_idf[a]] = map[a];
   1.125 -        _lower[_arc_idb[a]] = map[a];
   1.126        }
   1.127        return *this;
   1.128      }
   1.129 @@ -447,7 +446,7 @@
   1.130      /// calling \ref run(), the supply of each node will be set to zero.
   1.131      ///
   1.132      /// Using this function has the same effect as using \ref supplyMap()
   1.133 -    /// with such a map in which \c k is assigned to \c s, \c -k is
   1.134 +    /// with a map in which \c k is assigned to \c s, \c -k is
   1.135      /// assigned to \c t and all other nodes have zero supply value.
   1.136      ///
   1.137      /// \param s The source node.
   1.138 @@ -493,7 +492,7 @@
   1.139      ///
   1.140      /// \param method The internal method that will be used in the
   1.141      /// algorithm. For more information, see \ref Method.
   1.142 -    /// \param factor The cost scaling factor. It must be larger than one.
   1.143 +    /// \param factor The cost scaling factor. It must be at least two.
   1.144      ///
   1.145      /// \return \c INFEASIBLE if no feasible flow exists,
   1.146      /// \n \c OPTIMAL if the problem has optimal solution
   1.147 @@ -507,7 +506,8 @@
   1.148      ///
   1.149      /// \see ProblemType, Method
   1.150      /// \see resetParams(), reset()
   1.151 -    ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) {
   1.152 +    ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) {
   1.153 +      LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2");
   1.154        _alpha = factor;
   1.155        ProblemType pt = init();
   1.156        if (pt != OPTIMAL) return pt;
   1.157 @@ -567,22 +567,29 @@
   1.158          _scost[j] = 0;
   1.159          _scost[_reverse[j]] = 0;
   1.160        }
   1.161 -      _have_lower = false;
   1.162 +      _has_lower = false;
   1.163        return *this;
   1.164      }
   1.165  
   1.166 -    /// \brief Reset all the parameters that have been given before.
   1.167 +    /// \brief Reset the internal data structures and all the parameters
   1.168 +    /// that have been given before.
   1.169      ///
   1.170 -    /// This function resets all the paramaters that have been given
   1.171 -    /// before using functions \ref lowerMap(), \ref upperMap(),
   1.172 -    /// \ref costMap(), \ref supplyMap(), \ref stSupply().
   1.173 +    /// This function resets the internal data structures and all the
   1.174 +    /// paramaters that have been given before using functions \ref lowerMap(),
   1.175 +    /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply().
   1.176      ///
   1.177 -    /// It is useful for multiple run() calls. If this function is not
   1.178 -    /// used, all the parameters given before are kept for the next
   1.179 -    /// \ref run() call.
   1.180 -    /// However, the underlying digraph must not be modified after this
   1.181 -    /// class have been constructed, since it copies and extends the graph.
   1.182 +    /// It is useful for multiple \ref run() calls. By default, all the given
   1.183 +    /// parameters are kept for the next \ref run() call, unless
   1.184 +    /// \ref resetParams() or \ref reset() is used.
   1.185 +    /// If the underlying digraph was also modified after the construction
   1.186 +    /// of the class or the last \ref reset() call, then the \ref reset()
   1.187 +    /// function must be used, otherwise \ref resetParams() is sufficient.
   1.188 +    ///
   1.189 +    /// See \ref resetParams() for examples.
   1.190 +    ///
   1.191      /// \return <tt>(*this)</tt>
   1.192 +    ///
   1.193 +    /// \see resetParams(), run()
   1.194      CostScaling& reset() {
   1.195        // Resize vectors
   1.196        _node_num = countNodes(_graph);
   1.197 @@ -608,9 +615,6 @@
   1.198        _excess.resize(_res_node_num);
   1.199        _next_out.resize(_res_node_num);
   1.200  
   1.201 -      _arc_vec.reserve(_res_arc_num);
   1.202 -      _cost_vec.reserve(_res_arc_num);
   1.203 -
   1.204        // Copy the graph
   1.205        int i = 0, j = 0, k = 2 * _arc_num + _node_num;
   1.206        for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
   1.207 @@ -667,7 +671,7 @@
   1.208      /// \brief Return the total cost of the found flow.
   1.209      ///
   1.210      /// This function returns the total cost of the found flow.
   1.211 -    /// Its complexity is O(e).
   1.212 +    /// Its complexity is O(m).
   1.213      ///
   1.214      /// \note The return type of the function can be specified as a
   1.215      /// template parameter. For example,
   1.216 @@ -705,7 +709,8 @@
   1.217        return _res_cap[_arc_idb[a]];
   1.218      }
   1.219  
   1.220 -    /// \brief Return the flow map (the primal solution).
   1.221 +    /// \brief Copy the flow values (the primal solution) into the
   1.222 +    /// given map.
   1.223      ///
   1.224      /// This function copies the flow value on each arc into the given
   1.225      /// map. The \c Value type of the algorithm must be convertible to
   1.226 @@ -729,7 +734,8 @@
   1.227        return static_cast<Cost>(_pi[_node_id[n]]);
   1.228      }
   1.229  
   1.230 -    /// \brief Return the potential map (the dual solution).
   1.231 +    /// \brief Copy the potential values (the dual solution) into the
   1.232 +    /// given map.
   1.233      ///
   1.234      /// This function copies the potential (dual value) of each node
   1.235      /// into the given map.
   1.236 @@ -759,6 +765,10 @@
   1.237        }
   1.238        if (_sum_supply > 0) return INFEASIBLE;
   1.239  
   1.240 +      // Check lower and upper bounds
   1.241 +      LEMON_DEBUG(checkBoundMaps(),
   1.242 +          "Upper bounds must be greater or equal to the lower bounds");
   1.243 +
   1.244  
   1.245        // Initialize vectors
   1.246        for (int i = 0; i != _res_node_num; ++i) {
   1.247 @@ -769,7 +779,7 @@
   1.248        // Remove infinite upper bounds and check negative arcs
   1.249        const Value MAX = std::numeric_limits<Value>::max();
   1.250        int last_out;
   1.251 -      if (_have_lower) {
   1.252 +      if (_has_lower) {
   1.253          for (int i = 0; i != _root; ++i) {
   1.254            last_out = _first_out[i+1];
   1.255            for (int j = _first_out[i]; j != last_out; ++j) {
   1.256 @@ -826,7 +836,7 @@
   1.257        for (NodeIt n(_graph); n != INVALID; ++n) {
   1.258          sup[n] = _supply[_node_id[n]];
   1.259        }
   1.260 -      if (_have_lower) {
   1.261 +      if (_has_lower) {
   1.262          for (ArcIt a(_graph); a != INVALID; ++a) {
   1.263            int j = _arc_idf[a];
   1.264            Value c = _lower[j];
   1.265 @@ -886,14 +896,6 @@
   1.266          }
   1.267        }
   1.268  
   1.269 -      return OPTIMAL;
   1.270 -    }
   1.271 -
   1.272 -    // Execute the algorithm and transform the results
   1.273 -    void start(Method method) {
   1.274 -      // Maximum path length for partial augment
   1.275 -      const int MAX_PATH_LENGTH = 4;
   1.276 -
   1.277        // Initialize data structures for buckets
   1.278        _max_rank = _alpha * _res_node_num;
   1.279        _buckets.resize(_max_rank);
   1.280 @@ -901,7 +903,22 @@
   1.281        _bucket_prev.resize(_res_node_num + 1);
   1.282        _rank.resize(_res_node_num + 1);
   1.283  
   1.284 -      // Execute the algorithm
   1.285 +      return OPTIMAL;
   1.286 +    }
   1.287 +
   1.288 +    // Check if the upper bound is greater than or equal to the lower bound
   1.289 +    // on each forward arc.
   1.290 +    bool checkBoundMaps() {
   1.291 +      for (int j = 0; j != _res_arc_num; ++j) {
   1.292 +        if (_forward[j] && _upper[j] < _lower[j]) return false;
   1.293 +      }
   1.294 +      return true;
   1.295 +    }
   1.296 +
   1.297 +    // Execute the algorithm and transform the results
   1.298 +    void start(Method method) {
   1.299 +      const int MAX_PARTIAL_PATH_LENGTH = 4;
   1.300 +
   1.301        switch (method) {
   1.302          case PUSH:
   1.303            startPush();
   1.304 @@ -910,32 +927,73 @@
   1.305            startAugment(_res_node_num - 1);
   1.306            break;
   1.307          case PARTIAL_AUGMENT:
   1.308 -          startAugment(MAX_PATH_LENGTH);
   1.309 +          startAugment(MAX_PARTIAL_PATH_LENGTH);
   1.310            break;
   1.311        }
   1.312  
   1.313 -      // Compute node potentials for the original costs
   1.314 -      _arc_vec.clear();
   1.315 -      _cost_vec.clear();
   1.316 -      for (int j = 0; j != _res_arc_num; ++j) {
   1.317 -        if (_res_cap[j] > 0) {
   1.318 -          _arc_vec.push_back(IntPair(_source[j], _target[j]));
   1.319 -          _cost_vec.push_back(_scost[j]);
   1.320 +      // Compute node potentials (dual solution)
   1.321 +      for (int i = 0; i != _res_node_num; ++i) {
   1.322 +        _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha));
   1.323 +      }
   1.324 +      bool optimal = true;
   1.325 +      for (int i = 0; optimal && i != _res_node_num; ++i) {
   1.326 +        LargeCost pi_i = _pi[i];
   1.327 +        int last_out = _first_out[i+1];
   1.328 +        for (int j = _first_out[i]; j != last_out; ++j) {
   1.329 +          if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) {
   1.330 +            optimal = false;
   1.331 +            break;
   1.332 +          }
   1.333          }
   1.334        }
   1.335 -      _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
   1.336  
   1.337 -      typename BellmanFord<StaticDigraph, LargeCostArcMap>
   1.338 -        ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map);
   1.339 -      bf.distMap(_pi_map);
   1.340 -      bf.init(0);
   1.341 -      bf.start();
   1.342 +      if (!optimal) {
   1.343 +        // Compute node potentials for the original costs with BellmanFord
   1.344 +        // (if it is necessary)
   1.345 +        typedef std::pair<int, int> IntPair;
   1.346 +        StaticDigraph sgr;
   1.347 +        std::vector<IntPair> arc_vec;
   1.348 +        std::vector<LargeCost> cost_vec;
   1.349 +        LargeCostArcMap cost_map(cost_vec);
   1.350 +
   1.351 +        arc_vec.clear();
   1.352 +        cost_vec.clear();
   1.353 +        for (int j = 0; j != _res_arc_num; ++j) {
   1.354 +          if (_res_cap[j] > 0) {
   1.355 +            int u = _source[j], v = _target[j];
   1.356 +            arc_vec.push_back(IntPair(u, v));
   1.357 +            cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]);
   1.358 +          }
   1.359 +        }
   1.360 +        sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end());
   1.361 +
   1.362 +        typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create
   1.363 +          bf(sgr, cost_map);
   1.364 +        bf.init(0);
   1.365 +        bf.start();
   1.366 +
   1.367 +        for (int i = 0; i != _res_node_num; ++i) {
   1.368 +          _pi[i] += bf.dist(sgr.node(i));
   1.369 +        }
   1.370 +      }
   1.371 +
   1.372 +      // Shift potentials to meet the requirements of the GEQ type
   1.373 +      // optimality conditions
   1.374 +      LargeCost max_pot = _pi[_root];
   1.375 +      for (int i = 0; i != _res_node_num; ++i) {
   1.376 +        if (_pi[i] > max_pot) max_pot = _pi[i];
   1.377 +      }
   1.378 +      if (max_pot != 0) {
   1.379 +        for (int i = 0; i != _res_node_num; ++i) {
   1.380 +          _pi[i] -= max_pot;
   1.381 +        }
   1.382 +      }
   1.383  
   1.384        // Handle non-zero lower bounds
   1.385 -      if (_have_lower) {
   1.386 +      if (_has_lower) {
   1.387          int limit = _first_out[_root];
   1.388          for (int j = 0; j != limit; ++j) {
   1.389 -          if (!_forward[j]) _res_cap[j] += _lower[j];
   1.390 +          if (_forward[j]) _res_cap[_reverse[j]] += _lower[j];
   1.391          }
   1.392        }
   1.393      }
   1.394 @@ -947,13 +1005,15 @@
   1.395          int last_out = _first_out[u+1];
   1.396          LargeCost pi_u = _pi[u];
   1.397          for (int a = _first_out[u]; a != last_out; ++a) {
   1.398 -          int v = _target[a];
   1.399 -          if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) {
   1.400 -            Value delta = _res_cap[a];
   1.401 -            _excess[u] -= delta;
   1.402 -            _excess[v] += delta;
   1.403 -            _res_cap[a] = 0;
   1.404 -            _res_cap[_reverse[a]] += delta;
   1.405 +          Value delta = _res_cap[a];
   1.406 +          if (delta > 0) {
   1.407 +            int v = _target[a];
   1.408 +            if (_cost[a] + pi_u - _pi[v] < 0) {
   1.409 +              _excess[u] -= delta;
   1.410 +              _excess[v] += delta;
   1.411 +              _res_cap[a] = 0;
   1.412 +              _res_cap[_reverse[a]] += delta;
   1.413 +            }
   1.414            }
   1.415          }
   1.416        }
   1.417 @@ -969,53 +1029,254 @@
   1.418        }
   1.419      }
   1.420  
   1.421 -    // Early termination heuristic
   1.422 -    bool earlyTermination() {
   1.423 -      const double EARLY_TERM_FACTOR = 3.0;
   1.424 +    // Price (potential) refinement heuristic
   1.425 +    bool priceRefinement() {
   1.426  
   1.427 -      // Build a static residual graph
   1.428 -      _arc_vec.clear();
   1.429 -      _cost_vec.clear();
   1.430 -      for (int j = 0; j != _res_arc_num; ++j) {
   1.431 -        if (_res_cap[j] > 0) {
   1.432 -          _arc_vec.push_back(IntPair(_source[j], _target[j]));
   1.433 -          _cost_vec.push_back(_cost[j] + 1);
   1.434 +      // Stack for stroing the topological order
   1.435 +      IntVector stack(_res_node_num);
   1.436 +      int stack_top;
   1.437 +
   1.438 +      // Perform phases
   1.439 +      while (topologicalSort(stack, stack_top)) {
   1.440 +
   1.441 +        // Compute node ranks in the acyclic admissible network and
   1.442 +        // store the nodes in buckets
   1.443 +        for (int i = 0; i != _res_node_num; ++i) {
   1.444 +          _rank[i] = 0;
   1.445          }
   1.446 +        const int bucket_end = _root + 1;
   1.447 +        for (int r = 0; r != _max_rank; ++r) {
   1.448 +          _buckets[r] = bucket_end;
   1.449 +        }
   1.450 +        int top_rank = 0;
   1.451 +        for ( ; stack_top >= 0; --stack_top) {
   1.452 +          int u = stack[stack_top], v;
   1.453 +          int rank_u = _rank[u];
   1.454 +
   1.455 +          LargeCost rc, pi_u = _pi[u];
   1.456 +          int last_out = _first_out[u+1];
   1.457 +          for (int a = _first_out[u]; a != last_out; ++a) {
   1.458 +            if (_res_cap[a] > 0) {
   1.459 +              v = _target[a];
   1.460 +              rc = _cost[a] + pi_u - _pi[v];
   1.461 +              if (rc < 0) {
   1.462 +                LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon);
   1.463 +                if (nrc < LargeCost(_max_rank)) {
   1.464 +                  int new_rank_v = rank_u + static_cast<int>(nrc);
   1.465 +                  if (new_rank_v > _rank[v]) {
   1.466 +                    _rank[v] = new_rank_v;
   1.467 +                  }
   1.468 +                }
   1.469 +              }
   1.470 +            }
   1.471 +          }
   1.472 +
   1.473 +          if (rank_u > 0) {
   1.474 +            top_rank = std::max(top_rank, rank_u);
   1.475 +            int bfirst = _buckets[rank_u];
   1.476 +            _bucket_next[u] = bfirst;
   1.477 +            _bucket_prev[bfirst] = u;
   1.478 +            _buckets[rank_u] = u;
   1.479 +          }
   1.480 +        }
   1.481 +
   1.482 +        // Check if the current flow is epsilon-optimal
   1.483 +        if (top_rank == 0) {
   1.484 +          return true;
   1.485 +        }
   1.486 +
   1.487 +        // Process buckets in top-down order
   1.488 +        for (int rank = top_rank; rank > 0; --rank) {
   1.489 +          while (_buckets[rank] != bucket_end) {
   1.490 +            // Remove the first node from the current bucket
   1.491 +            int u = _buckets[rank];
   1.492 +            _buckets[rank] = _bucket_next[u];
   1.493 +
   1.494 +            // Search the outgoing arcs of u
   1.495 +            LargeCost rc, pi_u = _pi[u];
   1.496 +            int last_out = _first_out[u+1];
   1.497 +            int v, old_rank_v, new_rank_v;
   1.498 +            for (int a = _first_out[u]; a != last_out; ++a) {
   1.499 +              if (_res_cap[a] > 0) {
   1.500 +                v = _target[a];
   1.501 +                old_rank_v = _rank[v];
   1.502 +
   1.503 +                if (old_rank_v < rank) {
   1.504 +
   1.505 +                  // Compute the new rank of node v
   1.506 +                  rc = _cost[a] + pi_u - _pi[v];
   1.507 +                  if (rc < 0) {
   1.508 +                    new_rank_v = rank;
   1.509 +                  } else {
   1.510 +                    LargeCost nrc = rc / _epsilon;
   1.511 +                    new_rank_v = 0;
   1.512 +                    if (nrc < LargeCost(_max_rank)) {
   1.513 +                      new_rank_v = rank - 1 - static_cast<int>(nrc);
   1.514 +                    }
   1.515 +                  }
   1.516 +
   1.517 +                  // Change the rank of node v
   1.518 +                  if (new_rank_v > old_rank_v) {
   1.519 +                    _rank[v] = new_rank_v;
   1.520 +
   1.521 +                    // Remove v from its old bucket
   1.522 +                    if (old_rank_v > 0) {
   1.523 +                      if (_buckets[old_rank_v] == v) {
   1.524 +                        _buckets[old_rank_v] = _bucket_next[v];
   1.525 +                      } else {
   1.526 +                        int pv = _bucket_prev[v], nv = _bucket_next[v];
   1.527 +                        _bucket_next[pv] = nv;
   1.528 +                        _bucket_prev[nv] = pv;
   1.529 +                      }
   1.530 +                    }
   1.531 +
   1.532 +                    // Insert v into its new bucket
   1.533 +                    int nv = _buckets[new_rank_v];
   1.534 +                    _bucket_next[v] = nv;
   1.535 +                    _bucket_prev[nv] = v;
   1.536 +                    _buckets[new_rank_v] = v;
   1.537 +                  }
   1.538 +                }
   1.539 +              }
   1.540 +            }
   1.541 +
   1.542 +            // Refine potential of node u
   1.543 +            _pi[u] -= rank * _epsilon;
   1.544 +          }
   1.545 +        }
   1.546 +
   1.547        }
   1.548 -      _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
   1.549  
   1.550 -      // Run Bellman-Ford algorithm to check if the current flow is optimal
   1.551 -      BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map);
   1.552 -      bf.init(0);
   1.553 -      bool done = false;
   1.554 -      int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num)));
   1.555 -      for (int i = 0; i < K && !done; ++i) {
   1.556 -        done = bf.processNextWeakRound();
   1.557 +      return false;
   1.558 +    }
   1.559 +
   1.560 +    // Find and cancel cycles in the admissible network and
   1.561 +    // determine topological order using DFS
   1.562 +    bool topologicalSort(IntVector &stack, int &stack_top) {
   1.563 +      const int MAX_CYCLE_CANCEL = 1;
   1.564 +
   1.565 +      BoolVector reached(_res_node_num, false);
   1.566 +      BoolVector processed(_res_node_num, false);
   1.567 +      IntVector pred(_res_node_num);
   1.568 +      for (int i = 0; i != _res_node_num; ++i) {
   1.569 +        _next_out[i] = _first_out[i];
   1.570        }
   1.571 -      return done;
   1.572 +      stack_top = -1;
   1.573 +
   1.574 +      int cycle_cnt = 0;
   1.575 +      for (int start = 0; start != _res_node_num; ++start) {
   1.576 +        if (reached[start]) continue;
   1.577 +
   1.578 +        // Start DFS search from this start node
   1.579 +        pred[start] = -1;
   1.580 +        int tip = start, v;
   1.581 +        while (true) {
   1.582 +          // Check the outgoing arcs of the current tip node
   1.583 +          reached[tip] = true;
   1.584 +          LargeCost pi_tip = _pi[tip];
   1.585 +          int a, last_out = _first_out[tip+1];
   1.586 +          for (a = _next_out[tip]; a != last_out; ++a) {
   1.587 +            if (_res_cap[a] > 0) {
   1.588 +              v = _target[a];
   1.589 +              if (_cost[a] + pi_tip - _pi[v] < 0) {
   1.590 +                if (!reached[v]) {
   1.591 +                  // A new node is reached
   1.592 +                  reached[v] = true;
   1.593 +                  pred[v] = tip;
   1.594 +                  _next_out[tip] = a;
   1.595 +                  tip = v;
   1.596 +                  a = _next_out[tip];
   1.597 +                  last_out = _first_out[tip+1];
   1.598 +                  break;
   1.599 +                }
   1.600 +                else if (!processed[v]) {
   1.601 +                  // A cycle is found
   1.602 +                  ++cycle_cnt;
   1.603 +                  _next_out[tip] = a;
   1.604 +
   1.605 +                  // Find the minimum residual capacity along the cycle
   1.606 +                  Value d, delta = _res_cap[a];
   1.607 +                  int u, delta_node = tip;
   1.608 +                  for (u = tip; u != v; ) {
   1.609 +                    u = pred[u];
   1.610 +                    d = _res_cap[_next_out[u]];
   1.611 +                    if (d <= delta) {
   1.612 +                      delta = d;
   1.613 +                      delta_node = u;
   1.614 +                    }
   1.615 +                  }
   1.616 +
   1.617 +                  // Augment along the cycle
   1.618 +                  _res_cap[a] -= delta;
   1.619 +                  _res_cap[_reverse[a]] += delta;
   1.620 +                  for (u = tip; u != v; ) {
   1.621 +                    u = pred[u];
   1.622 +                    int ca = _next_out[u];
   1.623 +                    _res_cap[ca] -= delta;
   1.624 +                    _res_cap[_reverse[ca]] += delta;
   1.625 +                  }
   1.626 +
   1.627 +                  // Check the maximum number of cycle canceling
   1.628 +                  if (cycle_cnt >= MAX_CYCLE_CANCEL) {
   1.629 +                    return false;
   1.630 +                  }
   1.631 +
   1.632 +                  // Roll back search to delta_node
   1.633 +                  if (delta_node != tip) {
   1.634 +                    for (u = tip; u != delta_node; u = pred[u]) {
   1.635 +                      reached[u] = false;
   1.636 +                    }
   1.637 +                    tip = delta_node;
   1.638 +                    a = _next_out[tip] + 1;
   1.639 +                    last_out = _first_out[tip+1];
   1.640 +                    break;
   1.641 +                  }
   1.642 +                }
   1.643 +              }
   1.644 +            }
   1.645 +          }
   1.646 +
   1.647 +          // Step back to the previous node
   1.648 +          if (a == last_out) {
   1.649 +            processed[tip] = true;
   1.650 +            stack[++stack_top] = tip;
   1.651 +            tip = pred[tip];
   1.652 +            if (tip < 0) {
   1.653 +              // Finish DFS from the current start node
   1.654 +              break;
   1.655 +            }
   1.656 +            ++_next_out[tip];
   1.657 +          }
   1.658 +        }
   1.659 +
   1.660 +      }
   1.661 +
   1.662 +      return (cycle_cnt == 0);
   1.663      }
   1.664  
   1.665      // Global potential update heuristic
   1.666      void globalUpdate() {
   1.667 -      int bucket_end = _root + 1;
   1.668 +      const int bucket_end = _root + 1;
   1.669  
   1.670        // Initialize buckets
   1.671        for (int r = 0; r != _max_rank; ++r) {
   1.672          _buckets[r] = bucket_end;
   1.673        }
   1.674        Value total_excess = 0;
   1.675 +      int b0 = bucket_end;
   1.676        for (int i = 0; i != _res_node_num; ++i) {
   1.677          if (_excess[i] < 0) {
   1.678            _rank[i] = 0;
   1.679 -          _bucket_next[i] = _buckets[0];
   1.680 -          _bucket_prev[_buckets[0]] = i;
   1.681 -          _buckets[0] = i;
   1.682 +          _bucket_next[i] = b0;
   1.683 +          _bucket_prev[b0] = i;
   1.684 +          b0 = i;
   1.685          } else {
   1.686            total_excess += _excess[i];
   1.687            _rank[i] = _max_rank;
   1.688          }
   1.689        }
   1.690        if (total_excess == 0) return;
   1.691 +      _buckets[0] = b0;
   1.692  
   1.693        // Search the buckets
   1.694        int r = 0;
   1.695 @@ -1025,7 +1286,7 @@
   1.696            int u = _buckets[r];
   1.697            _buckets[r] = _bucket_next[u];
   1.698  
   1.699 -          // Search the incomming arcs of u
   1.700 +          // Search the incoming arcs of u
   1.701            LargeCost pi_u = _pi[u];
   1.702            int last_out = _first_out[u+1];
   1.703            for (int a = _first_out[u]; a != last_out; ++a) {
   1.704 @@ -1037,8 +1298,9 @@
   1.705                  // Compute the new rank of v
   1.706                  LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon;
   1.707                  int new_rank_v = old_rank_v;
   1.708 -                if (nrc < LargeCost(_max_rank))
   1.709 -                  new_rank_v = r + 1 + int(nrc);
   1.710 +                if (nrc < LargeCost(_max_rank)) {
   1.711 +                  new_rank_v = r + 1 + static_cast<int>(nrc);
   1.712 +                }
   1.713  
   1.714                  // Change the rank of v
   1.715                  if (new_rank_v < old_rank_v) {
   1.716 @@ -1050,14 +1312,16 @@
   1.717                      if (_buckets[old_rank_v] == v) {
   1.718                        _buckets[old_rank_v] = _bucket_next[v];
   1.719                      } else {
   1.720 -                      _bucket_next[_bucket_prev[v]] = _bucket_next[v];
   1.721 -                      _bucket_prev[_bucket_next[v]] = _bucket_prev[v];
   1.722 +                      int pv = _bucket_prev[v], nv = _bucket_next[v];
   1.723 +                      _bucket_next[pv] = nv;
   1.724 +                      _bucket_prev[nv] = pv;
   1.725                      }
   1.726                    }
   1.727  
   1.728 -                  // Insert v to its new bucket
   1.729 -                  _bucket_next[v] = _buckets[new_rank_v];
   1.730 -                  _bucket_prev[_buckets[new_rank_v]] = v;
   1.731 +                  // Insert v into its new bucket
   1.732 +                  int nv = _buckets[new_rank_v];
   1.733 +                  _bucket_next[v] = nv;
   1.734 +                  _bucket_prev[nv] = v;
   1.735                    _buckets[new_rank_v] = v;
   1.736                  }
   1.737                }
   1.738 @@ -1086,23 +1350,25 @@
   1.739      /// Execute the algorithm performing augment and relabel operations
   1.740      void startAugment(int max_length) {
   1.741        // Paramters for heuristics
   1.742 -      const int EARLY_TERM_EPSILON_LIMIT = 1000;
   1.743 -      const double GLOBAL_UPDATE_FACTOR = 3.0;
   1.744 -
   1.745 -      const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
   1.746 +      const int PRICE_REFINEMENT_LIMIT = 2;
   1.747 +      const double GLOBAL_UPDATE_FACTOR = 1.0;
   1.748 +      const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR *
   1.749          (_res_node_num + _sup_node_num * _sup_node_num));
   1.750 -      int next_update_limit = global_update_freq;
   1.751 -
   1.752 -      int relabel_cnt = 0;
   1.753 +      int next_global_update_limit = global_update_skip;
   1.754  
   1.755        // Perform cost scaling phases
   1.756 -      std::vector<int> path;
   1.757 +      IntVector path;
   1.758 +      BoolVector path_arc(_res_arc_num, false);
   1.759 +      int relabel_cnt = 0;
   1.760 +      int eps_phase_cnt = 0;
   1.761        for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
   1.762                                          1 : _epsilon / _alpha )
   1.763        {
   1.764 -        // Early termination heuristic
   1.765 -        if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
   1.766 -          if (earlyTermination()) break;
   1.767 +        ++eps_phase_cnt;
   1.768 +
   1.769 +        // Price refinement heuristic
   1.770 +        if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
   1.771 +          if (priceRefinement()) continue;
   1.772          }
   1.773  
   1.774          // Initialize current phase
   1.775 @@ -1119,32 +1385,45 @@
   1.776            int start = _active_nodes.front();
   1.777  
   1.778            // Find an augmenting path from the start node
   1.779 -          path.clear();
   1.780            int tip = start;
   1.781 -          while (_excess[tip] >= 0 && int(path.size()) < max_length) {
   1.782 +          while (int(path.size()) < max_length && _excess[tip] >= 0) {
   1.783              int u;
   1.784 -            LargeCost min_red_cost, rc, pi_tip = _pi[tip];
   1.785 +            LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max();
   1.786 +            LargeCost pi_tip = _pi[tip];
   1.787              int last_out = _first_out[tip+1];
   1.788              for (int a = _next_out[tip]; a != last_out; ++a) {
   1.789 -              u = _target[a];
   1.790 -              if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) {
   1.791 -                path.push_back(a);
   1.792 -                _next_out[tip] = a;
   1.793 -                tip = u;
   1.794 -                goto next_step;
   1.795 +              if (_res_cap[a] > 0) {
   1.796 +                u = _target[a];
   1.797 +                rc = _cost[a] + pi_tip - _pi[u];
   1.798 +                if (rc < 0) {
   1.799 +                  path.push_back(a);
   1.800 +                  _next_out[tip] = a;
   1.801 +                  if (path_arc[a]) {
   1.802 +                    goto augment;   // a cycle is found, stop path search
   1.803 +                  }
   1.804 +                  tip = u;
   1.805 +                  path_arc[a] = true;
   1.806 +                  goto next_step;
   1.807 +                }
   1.808 +                else if (rc < min_red_cost) {
   1.809 +                  min_red_cost = rc;
   1.810 +                }
   1.811                }
   1.812              }
   1.813  
   1.814              // Relabel tip node
   1.815 -            min_red_cost = std::numeric_limits<LargeCost>::max();
   1.816              if (tip != start) {
   1.817                int ra = _reverse[path.back()];
   1.818 -              min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]];
   1.819 +              min_red_cost =
   1.820 +                std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]);
   1.821              }
   1.822 +            last_out = _next_out[tip];
   1.823              for (int a = _first_out[tip]; a != last_out; ++a) {
   1.824 -              rc = _cost[a] + pi_tip - _pi[_target[a]];
   1.825 -              if (_res_cap[a] > 0 && rc < min_red_cost) {
   1.826 -                min_red_cost = rc;
   1.827 +              if (_res_cap[a] > 0) {
   1.828 +                rc = _cost[a] + pi_tip - _pi[_target[a]];
   1.829 +                if (rc < min_red_cost) {
   1.830 +                  min_red_cost = rc;
   1.831 +                }
   1.832                }
   1.833              }
   1.834              _pi[tip] -= min_red_cost + _epsilon;
   1.835 @@ -1153,7 +1432,9 @@
   1.836  
   1.837              // Step back
   1.838              if (tip != start) {
   1.839 -              tip = _source[path.back()];
   1.840 +              int pa = path.back();
   1.841 +              path_arc[pa] = false;
   1.842 +              tip = _source[pa];
   1.843                path.pop_back();
   1.844              }
   1.845  
   1.846 @@ -1161,51 +1442,59 @@
   1.847            }
   1.848  
   1.849            // Augment along the found path (as much flow as possible)
   1.850 +        augment:
   1.851            Value delta;
   1.852            int pa, u, v = start;
   1.853            for (int i = 0; i != int(path.size()); ++i) {
   1.854              pa = path[i];
   1.855              u = v;
   1.856              v = _target[pa];
   1.857 +            path_arc[pa] = false;
   1.858              delta = std::min(_res_cap[pa], _excess[u]);
   1.859              _res_cap[pa] -= delta;
   1.860              _res_cap[_reverse[pa]] += delta;
   1.861              _excess[u] -= delta;
   1.862              _excess[v] += delta;
   1.863 -            if (_excess[v] > 0 && _excess[v] <= delta)
   1.864 +            if (_excess[v] > 0 && _excess[v] <= delta) {
   1.865                _active_nodes.push_back(v);
   1.866 +            }
   1.867            }
   1.868 +          path.clear();
   1.869  
   1.870            // Global update heuristic
   1.871 -          if (relabel_cnt >= next_update_limit) {
   1.872 +          if (relabel_cnt >= next_global_update_limit) {
   1.873              globalUpdate();
   1.874 -            next_update_limit += global_update_freq;
   1.875 +            next_global_update_limit += global_update_skip;
   1.876            }
   1.877          }
   1.878 +
   1.879        }
   1.880 +
   1.881      }
   1.882  
   1.883      /// Execute the algorithm performing push and relabel operations
   1.884      void startPush() {
   1.885        // Paramters for heuristics
   1.886 -      const int EARLY_TERM_EPSILON_LIMIT = 1000;
   1.887 +      const int PRICE_REFINEMENT_LIMIT = 2;
   1.888        const double GLOBAL_UPDATE_FACTOR = 2.0;
   1.889  
   1.890 -      const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
   1.891 +      const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR *
   1.892          (_res_node_num + _sup_node_num * _sup_node_num));
   1.893 -      int next_update_limit = global_update_freq;
   1.894 -
   1.895 -      int relabel_cnt = 0;
   1.896 +      int next_global_update_limit = global_update_skip;
   1.897  
   1.898        // Perform cost scaling phases
   1.899        BoolVector hyper(_res_node_num, false);
   1.900        LargeCostVector hyper_cost(_res_node_num);
   1.901 +      int relabel_cnt = 0;
   1.902 +      int eps_phase_cnt = 0;
   1.903        for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
   1.904                                          1 : _epsilon / _alpha )
   1.905        {
   1.906 -        // Early termination heuristic
   1.907 -        if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
   1.908 -          if (earlyTermination()) break;
   1.909 +        ++eps_phase_cnt;
   1.910 +
   1.911 +        // Price refinement heuristic
   1.912 +        if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
   1.913 +          if (priceRefinement()) continue;
   1.914          }
   1.915  
   1.916          // Initialize current phase
   1.917 @@ -1277,9 +1566,11 @@
   1.918               min_red_cost = hyper[n] ? -hyper_cost[n] :
   1.919                 std::numeric_limits<LargeCost>::max();
   1.920              for (int a = _first_out[n]; a != last_out; ++a) {
   1.921 -              rc = _cost[a] + pi_n - _pi[_target[a]];
   1.922 -              if (_res_cap[a] > 0 && rc < min_red_cost) {
   1.923 -                min_red_cost = rc;
   1.924 +              if (_res_cap[a] > 0) {
   1.925 +                rc = _cost[a] + pi_n - _pi[_target[a]];
   1.926 +                if (rc < min_red_cost) {
   1.927 +                  min_red_cost = rc;
   1.928 +                }
   1.929                }
   1.930              }
   1.931              _pi[n] -= min_red_cost + _epsilon;
   1.932 @@ -1297,11 +1588,11 @@
   1.933            }
   1.934  
   1.935            // Global update heuristic
   1.936 -          if (relabel_cnt >= next_update_limit) {
   1.937 +          if (relabel_cnt >= next_global_update_limit) {
   1.938              globalUpdate();
   1.939              for (int u = 0; u != _res_node_num; ++u)
   1.940                hyper[u] = false;
   1.941 -            next_update_limit += global_update_freq;
   1.942 +            next_global_update_limit += global_update_skip;
   1.943            }
   1.944          }
   1.945        }