# HG changeset patch # User Peter Kovacs # Date 2009-02-24 09:46:02 # Node ID e8349c6f12ca30c0a54b36825969cdaca81f933c # Parent 6a17a722b50e1b22187f5a4cb99a7de28aada418 Port NetworkSimplex from SVN -r3520 (#234) diff --git a/lemon/Makefile.am b/lemon/Makefile.am --- a/lemon/Makefile.am +++ b/lemon/Makefile.am @@ -85,6 +85,7 @@ lemon/max_matching.h \ lemon/min_cost_arborescence.h \ lemon/nauty_reader.h \ + lemon/network_simplex.h \ lemon/path.h \ lemon/preflow.h \ lemon/radix_sort.h \ diff --git a/lemon/network_simplex.h b/lemon/network_simplex.h new file mode 100644 --- /dev/null +++ b/lemon/network_simplex.h @@ -0,0 +1,1191 @@ +/* -*- mode: C++; indent-tabs-mode: nil; -*- + * + * This file is a part of LEMON, a generic C++ optimization library. + * + * Copyright (C) 2003-2009 + * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport + * (Egervary Research Group on Combinatorial Optimization, EGRES). + * + * Permission to use, modify and distribute this software is granted + * provided that this copyright notice appears in all copies. For + * precise terms see the accompanying LICENSE file. + * + * This software is provided "AS IS" with no warranty of any kind, + * express or implied, and with no claim as to its suitability for any + * purpose. + * + */ + +#ifndef LEMON_NETWORK_SIMPLEX_H +#define LEMON_NETWORK_SIMPLEX_H + +/// \ingroup min_cost_flow +/// +/// \file +/// \brief Network simplex algorithm for finding a minimum cost flow. + +#include +#include +#include + +#include + +namespace lemon { + + /// \addtogroup min_cost_flow + /// @{ + + /// \brief Implementation of the primal network simplex algorithm + /// for finding a \ref min_cost_flow "minimum cost flow". + /// + /// \ref NetworkSimplex implements the primal network simplex algorithm + /// for finding a \ref min_cost_flow "minimum cost flow". + /// + /// \tparam Digraph The digraph type the algorithm runs on. + /// \tparam LowerMap The type of the lower bound map. + /// \tparam CapacityMap The type of the capacity (upper bound) map. + /// \tparam CostMap The type of the cost (length) map. + /// \tparam SupplyMap The type of the supply map. + /// + /// \warning + /// - Arc capacities and costs should be \e non-negative \e integers. + /// - Supply values should be \e signed \e integers. + /// - The value types of the maps should be convertible to each other. + /// - \c CostMap::Value must be signed type. + /// + /// \note \ref NetworkSimplex provides five different pivot rule + /// implementations that significantly affect the efficiency of the + /// algorithm. + /// By default "Block Search" pivot rule is used, which proved to be + /// by far the most efficient according to our benchmark tests. + /// However another pivot rule can be selected using \ref run() + /// function with the proper parameter. +#ifdef DOXYGEN + template < typename Digraph, + typename LowerMap, + typename CapacityMap, + typename CostMap, + typename SupplyMap > + +#else + template < typename Digraph, + typename LowerMap = typename Digraph::template ArcMap, + typename CapacityMap = typename Digraph::template ArcMap, + typename CostMap = typename Digraph::template ArcMap, + typename SupplyMap = typename Digraph::template NodeMap > +#endif + class NetworkSimplex + { + TEMPLATE_DIGRAPH_TYPEDEFS(Digraph); + + typedef typename CapacityMap::Value Capacity; + typedef typename CostMap::Value Cost; + typedef typename SupplyMap::Value Supply; + + typedef std::vector ArcVector; + typedef std::vector NodeVector; + typedef std::vector IntVector; + typedef std::vector BoolVector; + typedef std::vector CapacityVector; + typedef std::vector CostVector; + typedef std::vector SupplyVector; + + public: + + /// The type of the flow map + typedef typename Digraph::template ArcMap FlowMap; + /// The type of the potential map + typedef typename Digraph::template NodeMap PotentialMap; + + public: + + /// Enum type for selecting the pivot rule used by \ref run() + enum PivotRuleEnum { + FIRST_ELIGIBLE_PIVOT, + BEST_ELIGIBLE_PIVOT, + BLOCK_SEARCH_PIVOT, + CANDIDATE_LIST_PIVOT, + ALTERING_LIST_PIVOT + }; + + private: + + // State constants for arcs + enum ArcStateEnum { + STATE_UPPER = -1, + STATE_TREE = 0, + STATE_LOWER = 1 + }; + + private: + + // References for the original data + const Digraph &_orig_graph; + const LowerMap *_orig_lower; + const CapacityMap &_orig_cap; + const CostMap &_orig_cost; + const SupplyMap *_orig_supply; + Node _orig_source; + Node _orig_target; + Capacity _orig_flow_value; + + // Result maps + FlowMap *_flow_result; + PotentialMap *_potential_result; + bool _local_flow; + bool _local_potential; + + // Data structures for storing the graph + ArcVector _arc; + NodeVector _node; + IntNodeMap _node_id; + IntVector _source; + IntVector _target; + + // The number of nodes and arcs in the original graph + int _node_num; + int _arc_num; + + // Node and arc maps + CapacityVector _cap; + CostVector _cost; + CostVector _supply; + CapacityVector _flow; + CostVector _pi; + + // Node and arc maps for the spanning tree structure + IntVector _depth; + IntVector _parent; + IntVector _pred; + IntVector _thread; + BoolVector _forward; + IntVector _state; + + // The root node + int _root; + + // The entering arc in the current pivot iteration + int _in_arc; + + // Temporary data used in the current pivot iteration + int join, u_in, v_in, u_out, v_out; + int right, first, second, last; + int stem, par_stem, new_stem; + Capacity delta; + + private: + + /// \brief Implementation of the "First Eligible" pivot rule for the + /// \ref NetworkSimplex "network simplex" algorithm. + /// + /// This class implements the "First Eligible" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// For more information see \ref NetworkSimplex::run(). + class FirstEligiblePivotRule + { + private: + + // References to the NetworkSimplex class + const ArcVector &_arc; + const IntVector &_source; + const IntVector &_target; + const CostVector &_cost; + const IntVector &_state; + const CostVector &_pi; + int &_in_arc; + int _arc_num; + + // Pivot rule data + int _next_arc; + + public: + + /// Constructor + FirstEligiblePivotRule(NetworkSimplex &ns) : + _arc(ns._arc), _source(ns._source), _target(ns._target), + _cost(ns._cost), _state(ns._state), _pi(ns._pi), + _in_arc(ns._in_arc), _arc_num(ns._arc_num), _next_arc(0) + {} + + /// Find next entering arc + bool findEnteringArc() { + Cost c; + for (int e = _next_arc; e < _arc_num; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < 0) { + _in_arc = e; + _next_arc = e + 1; + return true; + } + } + for (int e = 0; e < _next_arc; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < 0) { + _in_arc = e; + _next_arc = e + 1; + return true; + } + } + return false; + } + + }; //class FirstEligiblePivotRule + + + /// \brief Implementation of the "Best Eligible" pivot rule for the + /// \ref NetworkSimplex "network simplex" algorithm. + /// + /// This class implements the "Best Eligible" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// For more information see \ref NetworkSimplex::run(). + class BestEligiblePivotRule + { + private: + + // References to the NetworkSimplex class + const ArcVector &_arc; + const IntVector &_source; + const IntVector &_target; + const CostVector &_cost; + const IntVector &_state; + const CostVector &_pi; + int &_in_arc; + int _arc_num; + + public: + + /// Constructor + BestEligiblePivotRule(NetworkSimplex &ns) : + _arc(ns._arc), _source(ns._source), _target(ns._target), + _cost(ns._cost), _state(ns._state), _pi(ns._pi), + _in_arc(ns._in_arc), _arc_num(ns._arc_num) + {} + + /// Find next entering arc + bool findEnteringArc() { + Cost c, min = 0; + for (int e = 0; e < _arc_num; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < min) { + min = c; + _in_arc = e; + } + } + return min < 0; + } + + }; //class BestEligiblePivotRule + + + /// \brief Implementation of the "Block Search" pivot rule for the + /// \ref NetworkSimplex "network simplex" algorithm. + /// + /// This class implements the "Block Search" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// For more information see \ref NetworkSimplex::run(). + class BlockSearchPivotRule + { + private: + + // References to the NetworkSimplex class + const ArcVector &_arc; + const IntVector &_source; + const IntVector &_target; + const CostVector &_cost; + const IntVector &_state; + const CostVector &_pi; + int &_in_arc; + int _arc_num; + + // Pivot rule data + int _block_size; + int _next_arc; + + public: + + /// Constructor + BlockSearchPivotRule(NetworkSimplex &ns) : + _arc(ns._arc), _source(ns._source), _target(ns._target), + _cost(ns._cost), _state(ns._state), _pi(ns._pi), + _in_arc(ns._in_arc), _arc_num(ns._arc_num), _next_arc(0) + { + // The main parameters of the pivot rule + const double BLOCK_SIZE_FACTOR = 2.0; + const int MIN_BLOCK_SIZE = 10; + + _block_size = std::max( int(BLOCK_SIZE_FACTOR * sqrt(_arc_num)), + MIN_BLOCK_SIZE ); + } + + /// Find next entering arc + bool findEnteringArc() { + Cost c, min = 0; + int cnt = _block_size; + int e, min_arc = _next_arc; + for (e = _next_arc; e < _arc_num; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < min) { + min = c; + min_arc = e; + } + if (--cnt == 0) { + if (min < 0) break; + cnt = _block_size; + } + } + if (min == 0 || cnt > 0) { + for (e = 0; e < _next_arc; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < min) { + min = c; + min_arc = e; + } + if (--cnt == 0) { + if (min < 0) break; + cnt = _block_size; + } + } + } + if (min >= 0) return false; + _in_arc = min_arc; + _next_arc = e; + return true; + } + + }; //class BlockSearchPivotRule + + + /// \brief Implementation of the "Candidate List" pivot rule for the + /// \ref NetworkSimplex "network simplex" algorithm. + /// + /// This class implements the "Candidate List" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// For more information see \ref NetworkSimplex::run(). + class CandidateListPivotRule + { + private: + + // References to the NetworkSimplex class + const ArcVector &_arc; + const IntVector &_source; + const IntVector &_target; + const CostVector &_cost; + const IntVector &_state; + const CostVector &_pi; + int &_in_arc; + int _arc_num; + + // Pivot rule data + IntVector _candidates; + int _list_length, _minor_limit; + int _curr_length, _minor_count; + int _next_arc; + + public: + + /// Constructor + CandidateListPivotRule(NetworkSimplex &ns) : + _arc(ns._arc), _source(ns._source), _target(ns._target), + _cost(ns._cost), _state(ns._state), _pi(ns._pi), + _in_arc(ns._in_arc), _arc_num(ns._arc_num), _next_arc(0) + { + // The main parameters of the pivot rule + const double LIST_LENGTH_FACTOR = 1.0; + const int MIN_LIST_LENGTH = 10; + const double MINOR_LIMIT_FACTOR = 0.1; + const int MIN_MINOR_LIMIT = 3; + + _list_length = std::max( int(LIST_LENGTH_FACTOR * sqrt(_arc_num)), + MIN_LIST_LENGTH ); + _minor_limit = std::max( int(MINOR_LIMIT_FACTOR * _list_length), + MIN_MINOR_LIMIT ); + _curr_length = _minor_count = 0; + _candidates.resize(_list_length); + } + + /// Find next entering arc + bool findEnteringArc() { + Cost min, c; + int e, min_arc = _next_arc; + if (_curr_length > 0 && _minor_count < _minor_limit) { + // Minor iteration: select the best eligible arc from the + // current candidate list + ++_minor_count; + min = 0; + for (int i = 0; i < _curr_length; ++i) { + e = _candidates[i]; + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < min) { + min = c; + min_arc = e; + } + if (c >= 0) { + _candidates[i--] = _candidates[--_curr_length]; + } + } + if (min < 0) { + _in_arc = min_arc; + return true; + } + } + + // Major iteration: build a new candidate list + min = 0; + _curr_length = 0; + for (e = _next_arc; e < _arc_num; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < 0) { + _candidates[_curr_length++] = e; + if (c < min) { + min = c; + min_arc = e; + } + if (_curr_length == _list_length) break; + } + } + if (_curr_length < _list_length) { + for (e = 0; e < _next_arc; ++e) { + c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (c < 0) { + _candidates[_curr_length++] = e; + if (c < min) { + min = c; + min_arc = e; + } + if (_curr_length == _list_length) break; + } + } + } + if (_curr_length == 0) return false; + _minor_count = 1; + _in_arc = min_arc; + _next_arc = e; + return true; + } + + }; //class CandidateListPivotRule + + + /// \brief Implementation of the "Altering Candidate List" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// This class implements the "Altering Candidate List" pivot rule + /// for the \ref NetworkSimplex "network simplex" algorithm. + /// + /// For more information see \ref NetworkSimplex::run(). + class AlteringListPivotRule + { + private: + + // References to the NetworkSimplex class + const ArcVector &_arc; + const IntVector &_source; + const IntVector &_target; + const CostVector &_cost; + const IntVector &_state; + const CostVector &_pi; + int &_in_arc; + int _arc_num; + + // Pivot rule data + int _block_size, _head_length, _curr_length; + int _next_arc; + IntVector _candidates; + CostVector _cand_cost; + + // Functor class to compare arcs during sort of the candidate list + class SortFunc + { + private: + const CostVector &_map; + public: + SortFunc(const CostVector &map) : _map(map) {} + bool operator()(int left, int right) { + return _map[left] > _map[right]; + } + }; + + SortFunc _sort_func; + + public: + + /// Constructor + AlteringListPivotRule(NetworkSimplex &ns) : + _arc(ns._arc), _source(ns._source), _target(ns._target), + _cost(ns._cost), _state(ns._state), _pi(ns._pi), + _in_arc(ns._in_arc), _arc_num(ns._arc_num), + _next_arc(0), _cand_cost(ns._arc_num), _sort_func(_cand_cost) + { + // The main parameters of the pivot rule + const double BLOCK_SIZE_FACTOR = 1.5; + const int MIN_BLOCK_SIZE = 10; + const double HEAD_LENGTH_FACTOR = 0.1; + const int MIN_HEAD_LENGTH = 3; + + _block_size = std::max( int(BLOCK_SIZE_FACTOR * sqrt(_arc_num)), + MIN_BLOCK_SIZE ); + _head_length = std::max( int(HEAD_LENGTH_FACTOR * _block_size), + MIN_HEAD_LENGTH ); + _candidates.resize(_head_length + _block_size); + _curr_length = 0; + } + + /// Find next entering arc + bool findEnteringArc() { + // Check the current candidate list + int e; + for (int i = 0; i < _curr_length; ++i) { + e = _candidates[i]; + _cand_cost[e] = _state[e] * + (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (_cand_cost[e] >= 0) { + _candidates[i--] = _candidates[--_curr_length]; + } + } + + // Extend the list + int cnt = _block_size; + int last_edge = 0; + int limit = _head_length; + + for (int e = _next_arc; e < _arc_num; ++e) { + _cand_cost[e] = _state[e] * + (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (_cand_cost[e] < 0) { + _candidates[_curr_length++] = e; + last_edge = e; + } + if (--cnt == 0) { + if (_curr_length > limit) break; + limit = 0; + cnt = _block_size; + } + } + if (_curr_length <= limit) { + for (int e = 0; e < _next_arc; ++e) { + _cand_cost[e] = _state[e] * + (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); + if (_cand_cost[e] < 0) { + _candidates[_curr_length++] = e; + last_edge = e; + } + if (--cnt == 0) { + if (_curr_length > limit) break; + limit = 0; + cnt = _block_size; + } + } + } + if (_curr_length == 0) return false; + _next_arc = last_edge + 1; + + // Make heap of the candidate list (approximating a partial sort) + make_heap( _candidates.begin(), _candidates.begin() + _curr_length, + _sort_func ); + + // Pop the first element of the heap + _in_arc = _candidates[0]; + pop_heap( _candidates.begin(), _candidates.begin() + _curr_length, + _sort_func ); + _curr_length = std::min(_head_length, _curr_length - 1); + return true; + } + + }; //class AlteringListPivotRule + + public: + + /// \brief General constructor (with lower bounds). + /// + /// General constructor (with lower bounds). + /// + /// \param digraph The digraph the algorithm runs on. + /// \param lower The lower bounds of the arcs. + /// \param capacity The capacities (upper bounds) of the arcs. + /// \param cost The cost (length) values of the arcs. + /// \param supply The supply values of the nodes (signed). + NetworkSimplex( const Digraph &digraph, + const LowerMap &lower, + const CapacityMap &capacity, + const CostMap &cost, + const SupplyMap &supply ) : + _orig_graph(digraph), _orig_lower(&lower), _orig_cap(capacity), + _orig_cost(cost), _orig_supply(&supply), + _flow_result(NULL), _potential_result(NULL), + _local_flow(false), _local_potential(false), + _node_id(digraph) + {} + + /// \brief General constructor (without lower bounds). + /// + /// General constructor (without lower bounds). + /// + /// \param digraph The digraph the algorithm runs on. + /// \param capacity The capacities (upper bounds) of the arcs. + /// \param cost The cost (length) values of the arcs. + /// \param supply The supply values of the nodes (signed). + NetworkSimplex( const Digraph &digraph, + const CapacityMap &capacity, + const CostMap &cost, + const SupplyMap &supply ) : + _orig_graph(digraph), _orig_lower(NULL), _orig_cap(capacity), + _orig_cost(cost), _orig_supply(&supply), + _flow_result(NULL), _potential_result(NULL), + _local_flow(false), _local_potential(false), + _node_id(digraph) + {} + + /// \brief Simple constructor (with lower bounds). + /// + /// Simple constructor (with lower bounds). + /// + /// \param digraph The digraph the algorithm runs on. + /// \param lower The lower bounds of the arcs. + /// \param capacity The capacities (upper bounds) of the arcs. + /// \param cost The cost (length) values of the arcs. + /// \param s The source node. + /// \param t The target node. + /// \param flow_value The required amount of flow from node \c s + /// to node \c t (i.e. the supply of \c s and the demand of \c t). + NetworkSimplex( const Digraph &digraph, + const LowerMap &lower, + const CapacityMap &capacity, + const CostMap &cost, + Node s, Node t, + Capacity flow_value ) : + _orig_graph(digraph), _orig_lower(&lower), _orig_cap(capacity), + _orig_cost(cost), _orig_supply(NULL), + _orig_source(s), _orig_target(t), _orig_flow_value(flow_value), + _flow_result(NULL), _potential_result(NULL), + _local_flow(false), _local_potential(false), + _node_id(digraph) + {} + + /// \brief Simple constructor (without lower bounds). + /// + /// Simple constructor (without lower bounds). + /// + /// \param digraph The digraph the algorithm runs on. + /// \param capacity The capacities (upper bounds) of the arcs. + /// \param cost The cost (length) values of the arcs. + /// \param s The source node. + /// \param t The target node. + /// \param flow_value The required amount of flow from node \c s + /// to node \c t (i.e. the supply of \c s and the demand of \c t). + NetworkSimplex( const Digraph &digraph, + const CapacityMap &capacity, + const CostMap &cost, + Node s, Node t, + Capacity flow_value ) : + _orig_graph(digraph), _orig_lower(NULL), _orig_cap(capacity), + _orig_cost(cost), _orig_supply(NULL), + _orig_source(s), _orig_target(t), _orig_flow_value(flow_value), + _flow_result(NULL), _potential_result(NULL), + _local_flow(false), _local_potential(false), + _node_id(digraph) + {} + + /// Destructor. + ~NetworkSimplex() { + if (_local_flow) delete _flow_result; + if (_local_potential) delete _potential_result; + } + + /// \brief Set the flow map. + /// + /// This function sets the flow map. + /// + /// \return (*this) + NetworkSimplex& flowMap(FlowMap &map) { + if (_local_flow) { + delete _flow_result; + _local_flow = false; + } + _flow_result = ↦ + return *this; + } + + /// \brief Set the potential map. + /// + /// This function sets the potential map. + /// + /// \return (*this) + NetworkSimplex& potentialMap(PotentialMap &map) { + if (_local_potential) { + delete _potential_result; + _local_potential = false; + } + _potential_result = ↦ + return *this; + } + + /// \name Execution control + /// The algorithm can be executed using the + /// \ref lemon::NetworkSimplex::run() "run()" function. + /// @{ + + /// \brief Run the algorithm. + /// + /// This function runs the algorithm. + /// + /// \param pivot_rule The pivot rule that is used during the + /// algorithm. + /// + /// The available pivot rules: + /// + /// - FIRST_ELIGIBLE_PIVOT The next eligible arc is selected in + /// a wraparound fashion in every iteration + /// (\ref FirstEligiblePivotRule). + /// + /// - BEST_ELIGIBLE_PIVOT The best eligible arc is selected in + /// every iteration (\ref BestEligiblePivotRule). + /// + /// - BLOCK_SEARCH_PIVOT A specified number of arcs are examined in + /// every iteration in a wraparound fashion and the best eligible + /// arc is selected from this block (\ref BlockSearchPivotRule). + /// + /// - CANDIDATE_LIST_PIVOT In a major iteration a candidate list is + /// built from eligible arcs in a wraparound fashion and in the + /// following minor iterations the best eligible arc is selected + /// from this list (\ref CandidateListPivotRule). + /// + /// - ALTERING_LIST_PIVOT It is a modified version of the + /// "Candidate List" pivot rule. It keeps only the several best + /// eligible arcs from the former candidate list and extends this + /// list in every iteration (\ref AlteringListPivotRule). + /// + /// According to our comprehensive benchmark tests the "Block Search" + /// pivot rule proved to be the fastest and the most robust on + /// various test inputs. Thus it is the default option. + /// + /// \return \c true if a feasible flow can be found. + bool run(PivotRuleEnum pivot_rule = BLOCK_SEARCH_PIVOT) { + return init() && start(pivot_rule); + } + + /// @} + + /// \name Query Functions + /// The results of the algorithm can be obtained using these + /// functions.\n + /// \ref lemon::NetworkSimplex::run() "run()" must be called before + /// using them. + /// @{ + + /// \brief Return a const reference to the flow map. + /// + /// This function returns a const reference to an arc map storing + /// the found flow. + /// + /// \pre \ref run() must be called before using this function. + const FlowMap& flowMap() const { + return *_flow_result; + } + + /// \brief Return a const reference to the potential map + /// (the dual solution). + /// + /// This function returns a const reference to a node map storing + /// the found potentials (the dual solution). + /// + /// \pre \ref run() must be called before using this function. + const PotentialMap& potentialMap() const { + return *_potential_result; + } + + /// \brief Return the flow on the given arc. + /// + /// This function returns the flow on the given arc. + /// + /// \pre \ref run() must be called before using this function. + Capacity flow(const Arc& arc) const { + return (*_flow_result)[arc]; + } + + /// \brief Return the potential of the given node. + /// + /// This function returns the potential of the given node. + /// + /// \pre \ref run() must be called before using this function. + Cost potential(const Node& node) const { + return (*_potential_result)[node]; + } + + /// \brief Return the total cost of the found flow. + /// + /// This function returns the total cost of the found flow. + /// The complexity of the function is \f$ O(e) \f$. + /// + /// \pre \ref run() must be called before using this function. + Cost totalCost() const { + Cost c = 0; + for (ArcIt e(_orig_graph); e != INVALID; ++e) + c += (*_flow_result)[e] * _orig_cost[e]; + return c; + } + + /// @} + + private: + + // Initialize internal data structures + bool init() { + // Initialize result maps + if (!_flow_result) { + _flow_result = new FlowMap(_orig_graph); + _local_flow = true; + } + if (!_potential_result) { + _potential_result = new PotentialMap(_orig_graph); + _local_potential = true; + } + + // Initialize vectors + _node_num = countNodes(_orig_graph); + _arc_num = countArcs(_orig_graph); + int all_node_num = _node_num + 1; + int all_edge_num = _arc_num + _node_num; + + _arc.resize(_arc_num); + _node.reserve(_node_num); + _source.resize(all_edge_num); + _target.resize(all_edge_num); + + _cap.resize(all_edge_num); + _cost.resize(all_edge_num); + _supply.resize(all_node_num); + _flow.resize(all_edge_num, 0); + _pi.resize(all_node_num, 0); + + _depth.resize(all_node_num); + _parent.resize(all_node_num); + _pred.resize(all_node_num); + _thread.resize(all_node_num); + _forward.resize(all_node_num); + _state.resize(all_edge_num, STATE_LOWER); + + // Initialize node related data + bool valid_supply = true; + if (_orig_supply) { + Supply sum = 0; + int i = 0; + for (NodeIt n(_orig_graph); n != INVALID; ++n, ++i) { + _node.push_back(n); + _node_id[n] = i; + _supply[i] = (*_orig_supply)[n]; + sum += _supply[i]; + } + valid_supply = (sum == 0); + } else { + int i = 0; + for (NodeIt n(_orig_graph); n != INVALID; ++n, ++i) { + _node.push_back(n); + _node_id[n] = i; + _supply[i] = 0; + } + _supply[_node_id[_orig_source]] = _orig_flow_value; + _supply[_node_id[_orig_target]] = -_orig_flow_value; + } + if (!valid_supply) return false; + + // Set data for the artificial root node + _root = _node_num; + _depth[_root] = 0; + _parent[_root] = -1; + _pred[_root] = -1; + _thread[_root] = 0; + _supply[_root] = 0; + _pi[_root] = 0; + + // Store the arcs in a mixed order + int k = std::max(int(sqrt(_arc_num)), 10); + int i = 0; + for (ArcIt e(_orig_graph); e != INVALID; ++e) { + _arc[i] = e; + if ((i += k) >= _arc_num) i = (i % k) + 1; + } + + // Initialize arc maps + for (int i = 0; i != _arc_num; ++i) { + Arc e = _arc[i]; + _source[i] = _node_id[_orig_graph.source(e)]; + _target[i] = _node_id[_orig_graph.target(e)]; + _cost[i] = _orig_cost[e]; + _cap[i] = _orig_cap[e]; + } + + // Remove non-zero lower bounds + if (_orig_lower) { + for (int i = 0; i != _arc_num; ++i) { + Capacity c = (*_orig_lower)[_arc[i]]; + if (c != 0) { + _cap[i] -= c; + _supply[_source[i]] -= c; + _supply[_target[i]] += c; + } + } + } + + // Add artificial arcs and initialize the spanning tree data structure + Cost max_cost = std::numeric_limits::max() / 4; + Capacity max_cap = std::numeric_limits::max(); + for (int u = 0, e = _arc_num; u != _node_num; ++u, ++e) { + _thread[u] = u + 1; + _depth[u] = 1; + _parent[u] = _root; + _pred[u] = e; + if (_supply[u] >= 0) { + _flow[e] = _supply[u]; + _forward[u] = true; + _pi[u] = -max_cost; + } else { + _flow[e] = -_supply[u]; + _forward[u] = false; + _pi[u] = max_cost; + } + _cost[e] = max_cost; + _cap[e] = max_cap; + _state[e] = STATE_TREE; + } + + return true; + } + + // Find the join node + void findJoinNode() { + int u = _source[_in_arc]; + int v = _target[_in_arc]; + while (_depth[u] > _depth[v]) u = _parent[u]; + while (_depth[v] > _depth[u]) v = _parent[v]; + while (u != v) { + u = _parent[u]; + v = _parent[v]; + } + join = u; + } + + // Find the leaving arc of the cycle and returns true if the + // leaving arc is not the same as the entering arc + bool findLeavingArc() { + // Initialize first and second nodes according to the direction + // of the cycle + if (_state[_in_arc] == STATE_LOWER) { + first = _source[_in_arc]; + second = _target[_in_arc]; + } else { + first = _target[_in_arc]; + second = _source[_in_arc]; + } + delta = _cap[_in_arc]; + int result = 0; + Capacity d; + int e; + + // Search the cycle along the path form the first node to the root + for (int u = first; u != join; u = _parent[u]) { + e = _pred[u]; + d = _forward[u] ? _flow[e] : _cap[e] - _flow[e]; + if (d < delta) { + delta = d; + u_out = u; + result = 1; + } + } + // Search the cycle along the path form the second node to the root + for (int u = second; u != join; u = _parent[u]) { + e = _pred[u]; + d = _forward[u] ? _cap[e] - _flow[e] : _flow[e]; + if (d <= delta) { + delta = d; + u_out = u; + result = 2; + } + } + + if (result == 1) { + u_in = first; + v_in = second; + } else { + u_in = second; + v_in = first; + } + return result != 0; + } + + // Change _flow and _state vectors + void changeFlow(bool change) { + // Augment along the cycle + if (delta > 0) { + Capacity val = _state[_in_arc] * delta; + _flow[_in_arc] += val; + for (int u = _source[_in_arc]; u != join; u = _parent[u]) { + _flow[_pred[u]] += _forward[u] ? -val : val; + } + for (int u = _target[_in_arc]; u != join; u = _parent[u]) { + _flow[_pred[u]] += _forward[u] ? val : -val; + } + } + // Update the state of the entering and leaving arcs + if (change) { + _state[_in_arc] = STATE_TREE; + _state[_pred[u_out]] = + (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; + } else { + _state[_in_arc] = -_state[_in_arc]; + } + } + + // Update _thread and _parent vectors + void updateThreadParent() { + int u; + v_out = _parent[u_out]; + + // Handle the case when join and v_out coincide + bool par_first = false; + if (join == v_out) { + for (u = join; u != u_in && u != v_in; u = _thread[u]) ; + if (u == v_in) { + par_first = true; + while (_thread[u] != u_out) u = _thread[u]; + first = u; + } + } + + // Find the last successor of u_in (u) and the node after it (right) + // according to the thread index + for (u = u_in; _depth[_thread[u]] > _depth[u_in]; u = _thread[u]) ; + right = _thread[u]; + if (_thread[v_in] == u_out) { + for (last = u; _depth[last] > _depth[u_out]; last = _thread[last]) ; + if (last == u_out) last = _thread[last]; + } + else last = _thread[v_in]; + + // Update stem nodes + _thread[v_in] = stem = u_in; + par_stem = v_in; + while (stem != u_out) { + _thread[u] = new_stem = _parent[stem]; + + // Find the node just before the stem node (u) according to + // the original thread index + for (u = new_stem; _thread[u] != stem; u = _thread[u]) ; + _thread[u] = right; + + // Change the parent node of stem and shift stem and par_stem nodes + _parent[stem] = par_stem; + par_stem = stem; + stem = new_stem; + + // Find the last successor of stem (u) and the node after it (right) + // according to the thread index + for (u = stem; _depth[_thread[u]] > _depth[stem]; u = _thread[u]) ; + right = _thread[u]; + } + _parent[u_out] = par_stem; + _thread[u] = last; + + if (join == v_out && par_first) { + if (first != v_in) _thread[first] = right; + } else { + for (u = v_out; _thread[u] != u_out; u = _thread[u]) ; + _thread[u] = right; + } + } + + // Update _pred and _forward vectors + void updatePredArc() { + int u = u_out, v; + while (u != u_in) { + v = _parent[u]; + _pred[u] = _pred[v]; + _forward[u] = !_forward[v]; + u = v; + } + _pred[u_in] = _in_arc; + _forward[u_in] = (u_in == _source[_in_arc]); + } + + // Update _depth and _potential vectors + void updateDepthPotential() { + _depth[u_in] = _depth[v_in] + 1; + Cost sigma = _forward[u_in] ? + _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : + _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; + _pi[u_in] += sigma; + for(int u = _thread[u_in]; _parent[u] != -1; u = _thread[u]) { + _depth[u] = _depth[_parent[u]] + 1; + if (_depth[u] <= _depth[u_in]) break; + _pi[u] += sigma; + } + } + + // Execute the algorithm + bool start(PivotRuleEnum pivot_rule) { + // Select the pivot rule implementation + switch (pivot_rule) { + case FIRST_ELIGIBLE_PIVOT: + return start(); + case BEST_ELIGIBLE_PIVOT: + return start(); + case BLOCK_SEARCH_PIVOT: + return start(); + case CANDIDATE_LIST_PIVOT: + return start(); + case ALTERING_LIST_PIVOT: + return start(); + } + return false; + } + + template + bool start() { + PivotRuleImplementation pivot(*this); + + // Execute the network simplex algorithm + while (pivot.findEnteringArc()) { + findJoinNode(); + bool change = findLeavingArc(); + changeFlow(change); + if (change) { + updateThreadParent(); + updatePredArc(); + updateDepthPotential(); + } + } + + // Check if the flow amount equals zero on all the artificial arcs + for (int e = _arc_num; e != _arc_num + _node_num; ++e) { + if (_flow[e] > 0) return false; + } + + // Copy flow values to _flow_result + if (_orig_lower) { + for (int i = 0; i != _arc_num; ++i) { + Arc e = _arc[i]; + (*_flow_result)[e] = (*_orig_lower)[e] + _flow[i]; + } + } else { + for (int i = 0; i != _arc_num; ++i) { + (*_flow_result)[_arc[i]] = _flow[i]; + } + } + // Copy potential values to _potential_result + for (int i = 0; i != _node_num; ++i) { + (*_potential_result)[_node[i]] = _pi[i]; + } + + return true; + } + + }; //class NetworkSimplex + + ///@} + +} //namespace lemon + +#endif //LEMON_NETWORK_SIMPLEX_H diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -30,6 +30,7 @@ maps_test max_matching_test min_cost_arborescence_test + min_cost_flow_test path_test preflow_test radix_sort_test diff --git a/test/Makefile.am b/test/Makefile.am --- a/test/Makefile.am +++ b/test/Makefile.am @@ -26,6 +26,7 @@ test/maps_test \ test/max_matching_test \ test/min_cost_arborescence_test \ + test/min_cost_flow_test \ test/path_test \ test/preflow_test \ test/radix_sort_test \ @@ -68,6 +69,7 @@ test_mip_test_SOURCES = test/mip_test.cc test_max_matching_test_SOURCES = test/max_matching_test.cc test_min_cost_arborescence_test_SOURCES = test/min_cost_arborescence_test.cc +test_min_cost_flow_test_SOURCES = test/min_cost_flow_test.cc test_path_test_SOURCES = test/path_test.cc test_preflow_test_SOURCES = test/preflow_test.cc test_radix_sort_test_SOURCES = test/radix_sort_test.cc diff --git a/test/min_cost_flow_test.cc b/test/min_cost_flow_test.cc new file mode 100644 --- /dev/null +++ b/test/min_cost_flow_test.cc @@ -0,0 +1,455 @@ +/* -*- mode: C++; indent-tabs-mode: nil; -*- + * + * This file is a part of LEMON, a generic C++ optimization library. + * + * Copyright (C) 2003-2009 + * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport + * (Egervary Research Group on Combinatorial Optimization, EGRES). + * + * Permission to use, modify and distribute this software is granted + * provided that this copyright notice appears in all copies. For + * precise terms see the accompanying LICENSE file. + * + * This software is provided "AS IS" with no warranty of any kind, + * express or implied, and with no claim as to its suitability for any + * purpose. + * + */ + +#include +#include + +#include +#include +#include + +//#include +//#include +//#include +#include +//#include +//#include + +#include +#include + +#include "test_tools.h" + +using namespace lemon; + +char test_lgf[] = + "@nodes\n" + "label sup1 sup2 sup3\n" + " 1 20 27 0\n" + " 2 -4 0 0\n" + " 3 0 0 0\n" + " 4 0 0 0\n" + " 5 9 0 0\n" + " 6 -6 0 0\n" + " 7 0 0 0\n" + " 8 0 0 0\n" + " 9 3 0 0\n" + " 10 -2 0 0\n" + " 11 0 0 0\n" + " 12 -20 -27 0\n" + "\n" + "@arcs\n" + " cost cap low1 low2\n" + " 1 2 70 11 0 8\n" + " 1 3 150 3 0 1\n" + " 1 4 80 15 0 2\n" + " 2 8 80 12 0 0\n" + " 3 5 140 5 0 3\n" + " 4 6 60 10 0 1\n" + " 4 7 80 2 0 0\n" + " 4 8 110 3 0 0\n" + " 5 7 60 14 0 0\n" + " 5 11 120 12 0 0\n" + " 6 3 0 3 0 0\n" + " 6 9 140 4 0 0\n" + " 6 10 90 8 0 0\n" + " 7 1 30 5 0 0\n" + " 8 12 60 16 0 4\n" + " 9 12 50 6 0 0\n" + "10 12 70 13 0 5\n" + "10 2 100 7 0 0\n" + "10 7 60 10 0 0\n" + "11 10 20 14 0 6\n" + "12 11 30 10 0 0\n" + "\n" + "@attributes\n" + "source 1\n" + "target 12\n"; + + +// Check the interface of an MCF algorithm +template +class McfClassConcept +{ +public: + + template + struct Constraints { + void constraints() { + checkConcept(); + + MCF mcf_test1(g, lower, upper, cost, sup); + MCF mcf_test2(g, upper, cost, sup); + MCF mcf_test3(g, lower, upper, cost, n, n, k); + MCF mcf_test4(g, upper, cost, n, n, k); + + // TODO: This part should be enabled and the next part + // should be removed if map copying is supported +/* + flow = mcf_test1.flowMap(); + mcf_test1.flowMap(flow); + + pot = mcf_test1.potentialMap(); + mcf_test1.potentialMap(pot); +*/ +/**/ + const typename MCF::FlowMap &fm = + mcf_test1.flowMap(); + mcf_test1.flowMap(flow); + const typename MCF::PotentialMap &pm = + mcf_test1.potentialMap(); + mcf_test1.potentialMap(pot); + ignore_unused_variable_warning(fm); + ignore_unused_variable_warning(pm); +/**/ + + mcf_test1.run(); + + v = mcf_test1.totalCost(); + v = mcf_test1.flow(a); + v = mcf_test1.potential(n); + } + + typedef typename GR::Node Node; + typedef typename GR::Arc Arc; + typedef concepts::ReadMap NM; + typedef concepts::ReadMap AM; + + const GR &g; + const AM &lower; + const AM &upper; + const AM &cost; + const NM ⊃ + const Node &n; + const Arc &a; + const Value &k; + Value v; + + typename MCF::FlowMap &flow; + typename MCF::PotentialMap &pot; + }; + +}; + + +// Check the feasibility of the given flow (primal soluiton) +template < typename GR, typename LM, typename UM, + typename SM, typename FM > +bool checkFlow( const GR& gr, const LM& lower, const UM& upper, + const SM& supply, const FM& flow ) +{ + TEMPLATE_DIGRAPH_TYPEDEFS(GR); + + for (ArcIt e(gr); e != INVALID; ++e) { + if (flow[e] < lower[e] || flow[e] > upper[e]) return false; + } + + for (NodeIt n(gr); n != INVALID; ++n) { + typename SM::Value sum = 0; + for (OutArcIt e(gr, n); e != INVALID; ++e) + sum += flow[e]; + for (InArcIt e(gr, n); e != INVALID; ++e) + sum -= flow[e]; + if (sum != supply[n]) return false; + } + + return true; +} + +// Check the feasibility of the given potentials (dual soluiton) +// using the Complementary Slackness optimality condition +template < typename GR, typename LM, typename UM, + typename CM, typename FM, typename PM > +bool checkPotential( const GR& gr, const LM& lower, const UM& upper, + const CM& cost, const FM& flow, const PM& pi ) +{ + TEMPLATE_DIGRAPH_TYPEDEFS(GR); + + bool opt = true; + for (ArcIt e(gr); opt && e != INVALID; ++e) { + typename CM::Value red_cost = + cost[e] + pi[gr.source(e)] - pi[gr.target(e)]; + opt = red_cost == 0 || + (red_cost > 0 && flow[e] == lower[e]) || + (red_cost < 0 && flow[e] == upper[e]); + } + return opt; +} + +// Run a minimum cost flow algorithm and check the results +template < typename MCF, typename GR, + typename LM, typename UM, + typename CM, typename SM > +void checkMcf( const MCF& mcf, bool mcf_result, + const GR& gr, const LM& lower, const UM& upper, + const CM& cost, const SM& supply, + bool result, typename CM::Value total, + const std::string &test_id = "" ) +{ + check(mcf_result == result, "Wrong result " + test_id); + if (result) { + check(checkFlow(gr, lower, upper, supply, mcf.flowMap()), + "The flow is not feasible " + test_id); + check(mcf.totalCost() == total, "The flow is not optimal " + test_id); + check(checkPotential(gr, lower, upper, cost, mcf.flowMap(), + mcf.potentialMap()), + "Wrong potentials " + test_id); + } +} + +int main() +{ + // Check the interfaces + { + typedef int Value; + // This typedef should be enabled if the standard maps are + // reference maps in the graph concepts + //typedef concepts::Digraph GR; + typedef ListDigraph GR; + typedef concepts::ReadMap NM; + typedef concepts::ReadMap AM; + + //checkConcept< McfClassConcept, + // CycleCanceling >(); + //checkConcept< McfClassConcept, + // CapacityScaling >(); + //checkConcept< McfClassConcept, + // CostScaling >(); + checkConcept< McfClassConcept, + NetworkSimplex >(); + //checkConcept< MinCostFlow, + // NetworkSimplex >(); + } + + // Run various MCF tests + typedef ListDigraph Digraph; + DIGRAPH_TYPEDEFS(ListDigraph); + + // Read the test digraph + Digraph gr; + Digraph::ArcMap c(gr), l1(gr), l2(gr), u(gr); + Digraph::NodeMap s1(gr), s2(gr), s3(gr); + Node v, w; + + std::istringstream input(test_lgf); + DigraphReader(gr, input) + .arcMap("cost", c) + .arcMap("cap", u) + .arcMap("low1", l1) + .arcMap("low2", l2) + .nodeMap("sup1", s1) + .nodeMap("sup2", s2) + .nodeMap("sup3", s3) + .node("source", v) + .node("target", w) + .run(); + +/* + // A. Test CapacityScaling with scaling + { + CapacityScaling mcf1(gr, u, c, s1); + CapacityScaling mcf2(gr, u, c, v, w, 27); + CapacityScaling mcf3(gr, u, c, s3); + CapacityScaling mcf4(gr, l2, u, c, s1); + CapacityScaling mcf5(gr, l2, u, c, v, w, 27); + CapacityScaling mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(), gr, l1, u, c, s1, true, 5240, "#A1"); + checkMcf(mcf2, mcf2.run(), gr, l1, u, c, s2, true, 7620, "#A2"); + checkMcf(mcf3, mcf3.run(), gr, l1, u, c, s3, true, 0, "#A3"); + checkMcf(mcf4, mcf4.run(), gr, l2, u, c, s1, true, 5970, "#A4"); + checkMcf(mcf5, mcf5.run(), gr, l2, u, c, s2, true, 8010, "#A5"); + checkMcf(mcf6, mcf6.run(), gr, l2, u, c, s3, false, 0, "#A6"); + } + + // B. Test CapacityScaling without scaling + { + CapacityScaling mcf1(gr, u, c, s1); + CapacityScaling mcf2(gr, u, c, v, w, 27); + CapacityScaling mcf3(gr, u, c, s3); + CapacityScaling mcf4(gr, l2, u, c, s1); + CapacityScaling mcf5(gr, l2, u, c, v, w, 27); + CapacityScaling mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(false), gr, l1, u, c, s1, true, 5240, "#B1"); + checkMcf(mcf2, mcf2.run(false), gr, l1, u, c, s2, true, 7620, "#B2"); + checkMcf(mcf3, mcf3.run(false), gr, l1, u, c, s3, true, 0, "#B3"); + checkMcf(mcf4, mcf4.run(false), gr, l2, u, c, s1, true, 5970, "#B4"); + checkMcf(mcf5, mcf5.run(false), gr, l2, u, c, s2, true, 8010, "#B5"); + checkMcf(mcf6, mcf6.run(false), gr, l2, u, c, s3, false, 0, "#B6"); + } + + // C. Test CostScaling using partial augment-relabel method + { + CostScaling mcf1(gr, u, c, s1); + CostScaling mcf2(gr, u, c, v, w, 27); + CostScaling mcf3(gr, u, c, s3); + CostScaling mcf4(gr, l2, u, c, s1); + CostScaling mcf5(gr, l2, u, c, v, w, 27); + CostScaling mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(), gr, l1, u, c, s1, true, 5240, "#C1"); + checkMcf(mcf2, mcf2.run(), gr, l1, u, c, s2, true, 7620, "#C2"); + checkMcf(mcf3, mcf3.run(), gr, l1, u, c, s3, true, 0, "#C3"); + checkMcf(mcf4, mcf4.run(), gr, l2, u, c, s1, true, 5970, "#C4"); + checkMcf(mcf5, mcf5.run(), gr, l2, u, c, s2, true, 8010, "#C5"); + checkMcf(mcf6, mcf6.run(), gr, l2, u, c, s3, false, 0, "#C6"); + } + + // D. Test CostScaling using push-relabel method + { + CostScaling mcf1(gr, u, c, s1); + CostScaling mcf2(gr, u, c, v, w, 27); + CostScaling mcf3(gr, u, c, s3); + CostScaling mcf4(gr, l2, u, c, s1); + CostScaling mcf5(gr, l2, u, c, v, w, 27); + CostScaling mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(false), gr, l1, u, c, s1, true, 5240, "#D1"); + checkMcf(mcf2, mcf2.run(false), gr, l1, u, c, s2, true, 7620, "#D2"); + checkMcf(mcf3, mcf3.run(false), gr, l1, u, c, s3, true, 0, "#D3"); + checkMcf(mcf4, mcf4.run(false), gr, l2, u, c, s1, true, 5970, "#D4"); + checkMcf(mcf5, mcf5.run(false), gr, l2, u, c, s2, true, 8010, "#D5"); + checkMcf(mcf6, mcf6.run(false), gr, l2, u, c, s3, false, 0, "#D6"); + } +*/ + + // E. Test NetworkSimplex with FIRST_ELIGIBLE_PIVOT + { + NetworkSimplex::PivotRuleEnum pr = + NetworkSimplex::FIRST_ELIGIBLE_PIVOT; + NetworkSimplex mcf1(gr, u, c, s1); + NetworkSimplex mcf2(gr, u, c, v, w, 27); + NetworkSimplex mcf3(gr, u, c, s3); + NetworkSimplex mcf4(gr, l2, u, c, s1); + NetworkSimplex mcf5(gr, l2, u, c, v, w, 27); + NetworkSimplex mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(pr), gr, l1, u, c, s1, true, 5240, "#E1"); + checkMcf(mcf2, mcf2.run(pr), gr, l1, u, c, s2, true, 7620, "#E2"); + checkMcf(mcf3, mcf3.run(pr), gr, l1, u, c, s3, true, 0, "#E3"); + checkMcf(mcf4, mcf4.run(pr), gr, l2, u, c, s1, true, 5970, "#E4"); + checkMcf(mcf5, mcf5.run(pr), gr, l2, u, c, s2, true, 8010, "#E5"); + checkMcf(mcf6, mcf6.run(pr), gr, l2, u, c, s3, false, 0, "#E6"); + } + + // F. Test NetworkSimplex with BEST_ELIGIBLE_PIVOT + { + NetworkSimplex::PivotRuleEnum pr = + NetworkSimplex::BEST_ELIGIBLE_PIVOT; + NetworkSimplex mcf1(gr, u, c, s1); + NetworkSimplex mcf2(gr, u, c, v, w, 27); + NetworkSimplex mcf3(gr, u, c, s3); + NetworkSimplex mcf4(gr, l2, u, c, s1); + NetworkSimplex mcf5(gr, l2, u, c, v, w, 27); + NetworkSimplex mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(pr), gr, l1, u, c, s1, true, 5240, "#F1"); + checkMcf(mcf2, mcf2.run(pr), gr, l1, u, c, s2, true, 7620, "#F2"); + checkMcf(mcf3, mcf3.run(pr), gr, l1, u, c, s3, true, 0, "#F3"); + checkMcf(mcf4, mcf4.run(pr), gr, l2, u, c, s1, true, 5970, "#F4"); + checkMcf(mcf5, mcf5.run(pr), gr, l2, u, c, s2, true, 8010, "#F5"); + checkMcf(mcf6, mcf6.run(pr), gr, l2, u, c, s3, false, 0, "#F6"); + } + + // G. Test NetworkSimplex with BLOCK_SEARCH_PIVOT + { + NetworkSimplex::PivotRuleEnum pr = + NetworkSimplex::BLOCK_SEARCH_PIVOT; + NetworkSimplex mcf1(gr, u, c, s1); + NetworkSimplex mcf2(gr, u, c, v, w, 27); + NetworkSimplex mcf3(gr, u, c, s3); + NetworkSimplex mcf4(gr, l2, u, c, s1); + NetworkSimplex mcf5(gr, l2, u, c, v, w, 27); + NetworkSimplex mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(pr), gr, l1, u, c, s1, true, 5240, "#G1"); + checkMcf(mcf2, mcf2.run(pr), gr, l1, u, c, s2, true, 7620, "#G2"); + checkMcf(mcf3, mcf3.run(pr), gr, l1, u, c, s3, true, 0, "#G3"); + checkMcf(mcf4, mcf4.run(pr), gr, l2, u, c, s1, true, 5970, "#G4"); + checkMcf(mcf5, mcf5.run(pr), gr, l2, u, c, s2, true, 8010, "#G5"); + checkMcf(mcf6, mcf6.run(pr), gr, l2, u, c, s3, false, 0, "#G6"); + } + + // H. Test NetworkSimplex with CANDIDATE_LIST_PIVOT + { + NetworkSimplex::PivotRuleEnum pr = + NetworkSimplex::CANDIDATE_LIST_PIVOT; + NetworkSimplex mcf1(gr, u, c, s1); + NetworkSimplex mcf2(gr, u, c, v, w, 27); + NetworkSimplex mcf3(gr, u, c, s3); + NetworkSimplex mcf4(gr, l2, u, c, s1); + NetworkSimplex mcf5(gr, l2, u, c, v, w, 27); + NetworkSimplex mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(pr), gr, l1, u, c, s1, true, 5240, "#H1"); + checkMcf(mcf2, mcf2.run(pr), gr, l1, u, c, s2, true, 7620, "#H2"); + checkMcf(mcf3, mcf3.run(pr), gr, l1, u, c, s3, true, 0, "#H3"); + checkMcf(mcf4, mcf4.run(pr), gr, l2, u, c, s1, true, 5970, "#H4"); + checkMcf(mcf5, mcf5.run(pr), gr, l2, u, c, s2, true, 8010, "#H5"); + checkMcf(mcf6, mcf6.run(pr), gr, l2, u, c, s3, false, 0, "#H6"); + } + + // I. Test NetworkSimplex with ALTERING_LIST_PIVOT + { + NetworkSimplex::PivotRuleEnum pr = + NetworkSimplex::ALTERING_LIST_PIVOT; + NetworkSimplex mcf1(gr, u, c, s1); + NetworkSimplex mcf2(gr, u, c, v, w, 27); + NetworkSimplex mcf3(gr, u, c, s3); + NetworkSimplex mcf4(gr, l2, u, c, s1); + NetworkSimplex mcf5(gr, l2, u, c, v, w, 27); + NetworkSimplex mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(pr), gr, l1, u, c, s1, true, 5240, "#I1"); + checkMcf(mcf2, mcf2.run(pr), gr, l1, u, c, s2, true, 7620, "#I2"); + checkMcf(mcf3, mcf3.run(pr), gr, l1, u, c, s3, true, 0, "#I3"); + checkMcf(mcf4, mcf4.run(pr), gr, l2, u, c, s1, true, 5970, "#I4"); + checkMcf(mcf5, mcf5.run(pr), gr, l2, u, c, s2, true, 8010, "#I5"); + checkMcf(mcf6, mcf6.run(pr), gr, l2, u, c, s3, false, 0, "#I6"); + } + +/* + // J. Test MinCostFlow + { + MinCostFlow mcf1(gr, u, c, s1); + MinCostFlow mcf2(gr, u, c, v, w, 27); + MinCostFlow mcf3(gr, u, c, s3); + MinCostFlow mcf4(gr, l2, u, c, s1); + MinCostFlow mcf5(gr, l2, u, c, v, w, 27); + MinCostFlow mcf6(gr, l2, u, c, s3); + + checkMcf(mcf1, mcf1.run(), gr, l1, u, c, s1, true, 5240, "#J1"); + checkMcf(mcf2, mcf2.run(), gr, l1, u, c, s2, true, 7620, "#J2"); + checkMcf(mcf3, mcf3.run(), gr, l1, u, c, s3, true, 0, "#J3"); + checkMcf(mcf4, mcf4.run(), gr, l2, u, c, s1, true, 5970, "#J4"); + checkMcf(mcf5, mcf5.run(), gr, l2, u, c, s2, true, 8010, "#J5"); + checkMcf(mcf6, mcf6.run(), gr, l2, u, c, s3, false, 0, "#J6"); + } +*/ +/* + // K. Test MinCostMaxFlow + { + MinCostMaxFlow mcmf(gr, u, c, v, w); + mcmf.run(); + checkMcf(mcmf, true, gr, l1, u, c, s3, true, 7620, "#K1"); + } +*/ + + return 0; +}