| 1 | 1 |
/* -*- mode: C++; indent-tabs-mode: nil; -*- |
| 2 | 2 |
* |
| 3 | 3 |
* This file is a part of LEMON, a generic C++ optimization library. |
| 4 | 4 |
* |
| 5 | 5 |
* Copyright (C) 2003-2009 |
| 6 | 6 |
* Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
| 7 | 7 |
* (Egervary Research Group on Combinatorial Optimization, EGRES). |
| 8 | 8 |
* |
| 9 | 9 |
* Permission to use, modify and distribute this software is granted |
| 10 | 10 |
* provided that this copyright notice appears in all copies. For |
| 11 | 11 |
* precise terms see the accompanying LICENSE file. |
| 12 | 12 |
* |
| 13 | 13 |
* This software is provided "AS IS" with no warranty of any kind, |
| 14 | 14 |
* express or implied, and with no claim as to its suitability for any |
| 15 | 15 |
* purpose. |
| 16 | 16 |
* |
| 17 | 17 |
*/ |
| 18 | 18 |
|
| 19 | 19 |
#ifndef LEMON_NETWORK_SIMPLEX_H |
| 20 | 20 |
#define LEMON_NETWORK_SIMPLEX_H |
| 21 | 21 |
|
| 22 | 22 |
/// \ingroup min_cost_flow |
| 23 | 23 |
/// |
| 24 | 24 |
/// \file |
| 25 | 25 |
/// \brief Network Simplex algorithm for finding a minimum cost flow. |
| 26 | 26 |
|
| 27 | 27 |
#include <vector> |
| 28 | 28 |
#include <limits> |
| 29 | 29 |
#include <algorithm> |
| 30 | 30 |
|
| 31 | 31 |
#include <lemon/core.h> |
| 32 | 32 |
#include <lemon/math.h> |
| 33 | 33 |
|
| 34 | 34 |
namespace lemon {
|
| 35 | 35 |
|
| 36 | 36 |
/// \addtogroup min_cost_flow |
| 37 | 37 |
/// @{
|
| 38 | 38 |
|
| 39 | 39 |
/// \brief Implementation of the primal Network Simplex algorithm |
| 40 | 40 |
/// for finding a \ref min_cost_flow "minimum cost flow". |
| 41 | 41 |
/// |
| 42 | 42 |
/// \ref NetworkSimplex implements the primal Network Simplex algorithm |
| 43 | 43 |
/// for finding a \ref min_cost_flow "minimum cost flow". |
| 44 | 44 |
/// This algorithm is a specialized version of the linear programming |
| 45 | 45 |
/// simplex method directly for the minimum cost flow problem. |
| 46 | 46 |
/// It is one of the most efficient solution methods. |
| 47 | 47 |
/// |
| 48 | 48 |
/// In general this class is the fastest implementation available |
| 49 | 49 |
/// in LEMON for the minimum cost flow problem. |
| 50 | 50 |
/// |
| 51 | 51 |
/// \tparam GR The digraph type the algorithm runs on. |
| 52 | 52 |
/// \tparam F The value type used for flow amounts, capacity bounds |
| 53 | 53 |
/// and supply values in the algorithm. By default it is \c int. |
| 54 | 54 |
/// \tparam C The value type used for costs and potentials in the |
| 55 | 55 |
/// algorithm. By default it is the same as \c F. |
| 56 | 56 |
/// |
| 57 |
/// \warning Both value types must be signed |
|
| 57 |
/// \warning Both value types must be signed and all input data must |
|
| 58 |
/// be integer. |
|
| 58 | 59 |
/// |
| 59 | 60 |
/// \note %NetworkSimplex provides five different pivot rule |
| 60 | 61 |
/// implementations. For more information see \ref PivotRule. |
| 61 | 62 |
template <typename GR, typename F = int, typename C = F> |
| 62 | 63 |
class NetworkSimplex |
| 63 | 64 |
{
|
| 64 | 65 |
public: |
| 65 | 66 |
|
| 66 | 67 |
/// The flow type of the algorithm |
| 67 | 68 |
typedef F Flow; |
| 68 | 69 |
/// The cost type of the algorithm |
| 69 | 70 |
typedef C Cost; |
| 70 | 71 |
/// The type of the flow map |
| 71 | 72 |
typedef typename GR::template ArcMap<Flow> FlowMap; |
| 72 | 73 |
/// The type of the potential map |
| 73 | 74 |
typedef typename GR::template NodeMap<Cost> PotentialMap; |
| 74 | 75 |
|
| 75 | 76 |
public: |
| 76 | 77 |
|
| 77 | 78 |
/// \brief Enum type for selecting the pivot rule. |
| 78 | 79 |
/// |
| 79 | 80 |
/// Enum type for selecting the pivot rule for the \ref run() |
| 80 | 81 |
/// function. |
| 81 | 82 |
/// |
| 82 | 83 |
/// \ref NetworkSimplex provides five different pivot rule |
| 83 | 84 |
/// implementations that significantly affect the running time |
| 84 | 85 |
/// of the algorithm. |
| 85 | 86 |
/// By default \ref BLOCK_SEARCH "Block Search" is used, which |
| 86 | 87 |
/// proved to be the most efficient and the most robust on various |
| 87 | 88 |
/// test inputs according to our benchmark tests. |
| 88 | 89 |
/// However another pivot rule can be selected using the \ref run() |
| 89 | 90 |
/// function with the proper parameter. |
| 90 | 91 |
enum PivotRule {
|
| 91 | 92 |
|
| 92 | 93 |
/// The First Eligible pivot rule. |
| 93 | 94 |
/// The next eligible arc is selected in a wraparound fashion |
| 94 | 95 |
/// in every iteration. |
| 95 | 96 |
FIRST_ELIGIBLE, |
| 96 | 97 |
|
| 97 | 98 |
/// The Best Eligible pivot rule. |
| 98 | 99 |
/// The best eligible arc is selected in every iteration. |
| 99 | 100 |
BEST_ELIGIBLE, |
| 100 | 101 |
|
| 101 | 102 |
/// The Block Search pivot rule. |
| 102 | 103 |
/// A specified number of arcs are examined in every iteration |
| 103 | 104 |
/// in a wraparound fashion and the best eligible arc is selected |
| 104 | 105 |
/// from this block. |
| 105 | 106 |
BLOCK_SEARCH, |
| 106 | 107 |
|
| 107 | 108 |
/// The Candidate List pivot rule. |
| 108 | 109 |
/// In a major iteration a candidate list is built from eligible arcs |
| 109 | 110 |
/// in a wraparound fashion and in the following minor iterations |
| 110 | 111 |
/// the best eligible arc is selected from this list. |
| 111 | 112 |
CANDIDATE_LIST, |
| 112 | 113 |
|
| 113 | 114 |
/// The Altering Candidate List pivot rule. |
| 114 | 115 |
/// It is a modified version of the Candidate List method. |
| 115 | 116 |
/// It keeps only the several best eligible arcs from the former |
| 116 | 117 |
/// candidate list and extends this list in every iteration. |
| 117 | 118 |
ALTERING_LIST |
| 118 | 119 |
}; |
| 119 | 120 |
|
| 120 | 121 |
private: |
| 121 | 122 |
|
| 122 | 123 |
TEMPLATE_DIGRAPH_TYPEDEFS(GR); |
| 123 | 124 |
|
| 124 | 125 |
typedef typename GR::template ArcMap<Flow> FlowArcMap; |
| 125 | 126 |
typedef typename GR::template ArcMap<Cost> CostArcMap; |
| 126 | 127 |
typedef typename GR::template NodeMap<Flow> FlowNodeMap; |
| 127 | 128 |
|
| 128 | 129 |
typedef std::vector<Arc> ArcVector; |
| 129 | 130 |
typedef std::vector<Node> NodeVector; |
| 130 | 131 |
typedef std::vector<int> IntVector; |
| 131 | 132 |
typedef std::vector<bool> BoolVector; |
| 132 | 133 |
typedef std::vector<Flow> FlowVector; |
| 133 | 134 |
typedef std::vector<Cost> CostVector; |
| 134 | 135 |
|
| 135 | 136 |
// State constants for arcs |
| 136 | 137 |
enum ArcStateEnum {
|
| 137 | 138 |
STATE_UPPER = -1, |
| 138 | 139 |
STATE_TREE = 0, |
| 139 | 140 |
STATE_LOWER = 1 |
| 140 | 141 |
}; |
| 141 | 142 |
|
| 142 | 143 |
private: |
| 143 | 144 |
|
| 144 | 145 |
// Data related to the underlying digraph |
| 145 | 146 |
const GR &_graph; |
| 146 | 147 |
int _node_num; |
| 147 | 148 |
int _arc_num; |
| 148 | 149 |
|
| 149 | 150 |
// Parameters of the problem |
| 150 | 151 |
FlowArcMap *_plower; |
| 151 | 152 |
FlowArcMap *_pupper; |
| 152 | 153 |
CostArcMap *_pcost; |
| 153 | 154 |
FlowNodeMap *_psupply; |
| ... | ... |
@@ -951,259 +952,273 @@ |
| 951 | 952 |
/// \ref min_cost_flow "minimum cost flow" problem. |
| 952 | 953 |
/// |
| 953 | 954 |
/// \pre \ref run() must be called before using this function. |
| 954 | 955 |
const PotentialMap& potentialMap() const {
|
| 955 | 956 |
return *_potential_map; |
| 956 | 957 |
} |
| 957 | 958 |
|
| 958 | 959 |
/// @} |
| 959 | 960 |
|
| 960 | 961 |
private: |
| 961 | 962 |
|
| 962 | 963 |
// Initialize internal data structures |
| 963 | 964 |
bool init() {
|
| 964 | 965 |
// Initialize result maps |
| 965 | 966 |
if (!_flow_map) {
|
| 966 | 967 |
_flow_map = new FlowMap(_graph); |
| 967 | 968 |
_local_flow = true; |
| 968 | 969 |
} |
| 969 | 970 |
if (!_potential_map) {
|
| 970 | 971 |
_potential_map = new PotentialMap(_graph); |
| 971 | 972 |
_local_potential = true; |
| 972 | 973 |
} |
| 973 | 974 |
|
| 974 | 975 |
// Initialize vectors |
| 975 | 976 |
_node_num = countNodes(_graph); |
| 976 | 977 |
_arc_num = countArcs(_graph); |
| 977 | 978 |
int all_node_num = _node_num + 1; |
| 978 | 979 |
int all_arc_num = _arc_num + _node_num; |
| 979 | 980 |
if (_node_num == 0) return false; |
| 980 | 981 |
|
| 981 | 982 |
_arc_ref.resize(_arc_num); |
| 982 | 983 |
_source.resize(all_arc_num); |
| 983 | 984 |
_target.resize(all_arc_num); |
| 984 | 985 |
|
| 985 | 986 |
_cap.resize(all_arc_num); |
| 986 | 987 |
_cost.resize(all_arc_num); |
| 987 | 988 |
_supply.resize(all_node_num); |
| 988 | 989 |
_flow.resize(all_arc_num); |
| 989 | 990 |
_pi.resize(all_node_num); |
| 990 | 991 |
|
| 991 | 992 |
_parent.resize(all_node_num); |
| 992 | 993 |
_pred.resize(all_node_num); |
| 993 | 994 |
_forward.resize(all_node_num); |
| 994 | 995 |
_thread.resize(all_node_num); |
| 995 | 996 |
_rev_thread.resize(all_node_num); |
| 996 | 997 |
_succ_num.resize(all_node_num); |
| 997 | 998 |
_last_succ.resize(all_node_num); |
| 998 | 999 |
_state.resize(all_arc_num); |
| 999 | 1000 |
|
| 1000 | 1001 |
// Initialize node related data |
| 1001 | 1002 |
bool valid_supply = true; |
| 1002 | 1003 |
if (!_pstsup && !_psupply) {
|
| 1003 | 1004 |
_pstsup = true; |
| 1004 | 1005 |
_psource = _ptarget = NodeIt(_graph); |
| 1005 | 1006 |
_pstflow = 0; |
| 1006 | 1007 |
} |
| 1007 | 1008 |
if (_psupply) {
|
| 1008 | 1009 |
Flow sum = 0; |
| 1009 | 1010 |
int i = 0; |
| 1010 | 1011 |
for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
|
| 1011 | 1012 |
_node_id[n] = i; |
| 1012 | 1013 |
_supply[i] = (*_psupply)[n]; |
| 1013 | 1014 |
sum += _supply[i]; |
| 1014 | 1015 |
} |
| 1015 | 1016 |
valid_supply = (sum == 0); |
| 1016 | 1017 |
} else {
|
| 1017 | 1018 |
int i = 0; |
| 1018 | 1019 |
for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
|
| 1019 | 1020 |
_node_id[n] = i; |
| 1020 | 1021 |
_supply[i] = 0; |
| 1021 | 1022 |
} |
| 1022 | 1023 |
_supply[_node_id[_psource]] = _pstflow; |
| 1023 | 1024 |
_supply[_node_id[_ptarget]] = -_pstflow; |
| 1024 | 1025 |
} |
| 1025 | 1026 |
if (!valid_supply) return false; |
| 1026 | 1027 |
|
| 1027 | 1028 |
// Set data for the artificial root node |
| 1028 | 1029 |
_root = _node_num; |
| 1029 | 1030 |
_parent[_root] = -1; |
| 1030 | 1031 |
_pred[_root] = -1; |
| 1031 | 1032 |
_thread[_root] = 0; |
| 1032 | 1033 |
_rev_thread[0] = _root; |
| 1033 | 1034 |
_succ_num[_root] = all_node_num; |
| 1034 | 1035 |
_last_succ[_root] = _root - 1; |
| 1035 | 1036 |
_supply[_root] = 0; |
| 1036 | 1037 |
_pi[_root] = 0; |
| 1037 | 1038 |
|
| 1038 | 1039 |
// Store the arcs in a mixed order |
| 1039 | 1040 |
int k = std::max(int(sqrt(_arc_num)), 10); |
| 1040 | 1041 |
int i = 0; |
| 1041 | 1042 |
for (ArcIt e(_graph); e != INVALID; ++e) {
|
| 1042 | 1043 |
_arc_ref[i] = e; |
| 1043 | 1044 |
if ((i += k) >= _arc_num) i = (i % k) + 1; |
| 1044 | 1045 |
} |
| 1045 | 1046 |
|
| 1046 | 1047 |
// Initialize arc maps |
| 1047 |
Flow max_cap = std::numeric_limits<Flow>::max(); |
|
| 1048 |
Cost max_cost = std::numeric_limits<Cost>::max() / 4; |
|
| 1048 |
Flow inf_cap = |
|
| 1049 |
std::numeric_limits<Flow>::has_infinity ? |
|
| 1050 |
std::numeric_limits<Flow>::infinity() : |
|
| 1051 |
std::numeric_limits<Flow>::max(); |
|
| 1049 | 1052 |
if (_pupper && _pcost) {
|
| 1050 | 1053 |
for (int i = 0; i != _arc_num; ++i) {
|
| 1051 | 1054 |
Arc e = _arc_ref[i]; |
| 1052 | 1055 |
_source[i] = _node_id[_graph.source(e)]; |
| 1053 | 1056 |
_target[i] = _node_id[_graph.target(e)]; |
| 1054 | 1057 |
_cap[i] = (*_pupper)[e]; |
| 1055 | 1058 |
_cost[i] = (*_pcost)[e]; |
| 1056 | 1059 |
_flow[i] = 0; |
| 1057 | 1060 |
_state[i] = STATE_LOWER; |
| 1058 | 1061 |
} |
| 1059 | 1062 |
} else {
|
| 1060 | 1063 |
for (int i = 0; i != _arc_num; ++i) {
|
| 1061 | 1064 |
Arc e = _arc_ref[i]; |
| 1062 | 1065 |
_source[i] = _node_id[_graph.source(e)]; |
| 1063 | 1066 |
_target[i] = _node_id[_graph.target(e)]; |
| 1064 | 1067 |
_flow[i] = 0; |
| 1065 | 1068 |
_state[i] = STATE_LOWER; |
| 1066 | 1069 |
} |
| 1067 | 1070 |
if (_pupper) {
|
| 1068 | 1071 |
for (int i = 0; i != _arc_num; ++i) |
| 1069 | 1072 |
_cap[i] = (*_pupper)[_arc_ref[i]]; |
| 1070 | 1073 |
} else {
|
| 1071 | 1074 |
for (int i = 0; i != _arc_num; ++i) |
| 1072 |
_cap[i] = |
|
| 1075 |
_cap[i] = inf_cap; |
|
| 1073 | 1076 |
} |
| 1074 | 1077 |
if (_pcost) {
|
| 1075 | 1078 |
for (int i = 0; i != _arc_num; ++i) |
| 1076 | 1079 |
_cost[i] = (*_pcost)[_arc_ref[i]]; |
| 1077 | 1080 |
} else {
|
| 1078 | 1081 |
for (int i = 0; i != _arc_num; ++i) |
| 1079 | 1082 |
_cost[i] = 1; |
| 1080 | 1083 |
} |
| 1081 | 1084 |
} |
| 1085 |
|
|
| 1086 |
// Initialize artifical cost |
|
| 1087 |
Cost art_cost; |
|
| 1088 |
if (std::numeric_limits<Cost>::is_exact) {
|
|
| 1089 |
art_cost = std::numeric_limits<Cost>::max() / 4 + 1; |
|
| 1090 |
} else {
|
|
| 1091 |
art_cost = std::numeric_limits<Cost>::min(); |
|
| 1092 |
for (int i = 0; i != _arc_num; ++i) {
|
|
| 1093 |
if (_cost[i] > art_cost) art_cost = _cost[i]; |
|
| 1094 |
} |
|
| 1095 |
art_cost = (art_cost + 1) * _node_num; |
|
| 1096 |
} |
|
| 1082 | 1097 |
|
| 1083 | 1098 |
// Remove non-zero lower bounds |
| 1084 | 1099 |
if (_plower) {
|
| 1085 | 1100 |
for (int i = 0; i != _arc_num; ++i) {
|
| 1086 | 1101 |
Flow c = (*_plower)[_arc_ref[i]]; |
| 1087 | 1102 |
if (c != 0) {
|
| 1088 | 1103 |
_cap[i] -= c; |
| 1089 | 1104 |
_supply[_source[i]] -= c; |
| 1090 | 1105 |
_supply[_target[i]] += c; |
| 1091 | 1106 |
} |
| 1092 | 1107 |
} |
| 1093 | 1108 |
} |
| 1094 | 1109 |
|
| 1095 | 1110 |
// Add artificial arcs and initialize the spanning tree data structure |
| 1096 | 1111 |
for (int u = 0, e = _arc_num; u != _node_num; ++u, ++e) {
|
| 1097 | 1112 |
_thread[u] = u + 1; |
| 1098 | 1113 |
_rev_thread[u + 1] = u; |
| 1099 | 1114 |
_succ_num[u] = 1; |
| 1100 | 1115 |
_last_succ[u] = u; |
| 1101 | 1116 |
_parent[u] = _root; |
| 1102 | 1117 |
_pred[u] = e; |
| 1103 |
_cost[e] = max_cost; |
|
| 1104 |
_cap[e] = max_cap; |
|
| 1118 |
_cost[e] = art_cost; |
|
| 1119 |
_cap[e] = inf_cap; |
|
| 1105 | 1120 |
_state[e] = STATE_TREE; |
| 1106 | 1121 |
if (_supply[u] >= 0) {
|
| 1107 | 1122 |
_flow[e] = _supply[u]; |
| 1108 | 1123 |
_forward[u] = true; |
| 1109 |
_pi[u] = - |
|
| 1124 |
_pi[u] = -art_cost; |
|
| 1110 | 1125 |
} else {
|
| 1111 | 1126 |
_flow[e] = -_supply[u]; |
| 1112 | 1127 |
_forward[u] = false; |
| 1113 |
_pi[u] = |
|
| 1128 |
_pi[u] = art_cost; |
|
| 1114 | 1129 |
} |
| 1115 | 1130 |
} |
| 1116 | 1131 |
|
| 1117 | 1132 |
return true; |
| 1118 | 1133 |
} |
| 1119 | 1134 |
|
| 1120 | 1135 |
// Find the join node |
| 1121 | 1136 |
void findJoinNode() {
|
| 1122 | 1137 |
int u = _source[in_arc]; |
| 1123 | 1138 |
int v = _target[in_arc]; |
| 1124 | 1139 |
while (u != v) {
|
| 1125 | 1140 |
if (_succ_num[u] < _succ_num[v]) {
|
| 1126 | 1141 |
u = _parent[u]; |
| 1127 | 1142 |
} else {
|
| 1128 | 1143 |
v = _parent[v]; |
| 1129 | 1144 |
} |
| 1130 | 1145 |
} |
| 1131 | 1146 |
join = u; |
| 1132 | 1147 |
} |
| 1133 | 1148 |
|
| 1134 | 1149 |
// Find the leaving arc of the cycle and returns true if the |
| 1135 | 1150 |
// leaving arc is not the same as the entering arc |
| 1136 | 1151 |
bool findLeavingArc() {
|
| 1137 | 1152 |
// Initialize first and second nodes according to the direction |
| 1138 | 1153 |
// of the cycle |
| 1139 | 1154 |
if (_state[in_arc] == STATE_LOWER) {
|
| 1140 | 1155 |
first = _source[in_arc]; |
| 1141 | 1156 |
second = _target[in_arc]; |
| 1142 | 1157 |
} else {
|
| 1143 | 1158 |
first = _target[in_arc]; |
| 1144 | 1159 |
second = _source[in_arc]; |
| 1145 | 1160 |
} |
| 1146 | 1161 |
delta = _cap[in_arc]; |
| 1147 | 1162 |
int result = 0; |
| 1148 | 1163 |
Flow d; |
| 1149 | 1164 |
int e; |
| 1150 | 1165 |
|
| 1151 | 1166 |
// Search the cycle along the path form the first node to the root |
| 1152 | 1167 |
for (int u = first; u != join; u = _parent[u]) {
|
| 1153 | 1168 |
e = _pred[u]; |
| 1154 | 1169 |
d = _forward[u] ? _flow[e] : _cap[e] - _flow[e]; |
| 1155 | 1170 |
if (d < delta) {
|
| 1156 | 1171 |
delta = d; |
| 1157 | 1172 |
u_out = u; |
| 1158 | 1173 |
result = 1; |
| 1159 | 1174 |
} |
| 1160 | 1175 |
} |
| 1161 | 1176 |
// Search the cycle along the path form the second node to the root |
| 1162 | 1177 |
for (int u = second; u != join; u = _parent[u]) {
|
| 1163 | 1178 |
e = _pred[u]; |
| 1164 | 1179 |
d = _forward[u] ? _cap[e] - _flow[e] : _flow[e]; |
| 1165 | 1180 |
if (d <= delta) {
|
| 1166 | 1181 |
delta = d; |
| 1167 | 1182 |
u_out = u; |
| 1168 | 1183 |
result = 2; |
| 1169 | 1184 |
} |
| 1170 | 1185 |
} |
| 1171 | 1186 |
|
| 1172 | 1187 |
if (result == 1) {
|
| 1173 | 1188 |
u_in = first; |
| 1174 | 1189 |
v_in = second; |
| 1175 | 1190 |
} else {
|
| 1176 | 1191 |
u_in = second; |
| 1177 | 1192 |
v_in = first; |
| 1178 | 1193 |
} |
| 1179 | 1194 |
return result != 0; |
| 1180 | 1195 |
} |
| 1181 | 1196 |
|
| 1182 | 1197 |
// Change _flow and _state vectors |
| 1183 | 1198 |
void changeFlow(bool change) {
|
| 1184 | 1199 |
// Augment along the cycle |
| 1185 | 1200 |
if (delta > 0) {
|
| 1186 | 1201 |
Flow val = _state[in_arc] * delta; |
| 1187 | 1202 |
_flow[in_arc] += val; |
| 1188 | 1203 |
for (int u = _source[in_arc]; u != join; u = _parent[u]) {
|
| 1189 | 1204 |
_flow[_pred[u]] += _forward[u] ? -val : val; |
| 1190 | 1205 |
} |
| 1191 | 1206 |
for (int u = _target[in_arc]; u != join; u = _parent[u]) {
|
| 1192 | 1207 |
_flow[_pred[u]] += _forward[u] ? val : -val; |
| 1193 | 1208 |
} |
| 1194 | 1209 |
} |
| 1195 | 1210 |
// Update the state of the entering and leaving arcs |
| 1196 | 1211 |
if (change) {
|
| 1197 | 1212 |
_state[in_arc] = STATE_TREE; |
| 1198 | 1213 |
_state[_pred[u_out]] = |
| 1199 | 1214 |
(_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; |
| 1200 | 1215 |
} else {
|
| 1201 | 1216 |
_state[in_arc] = -_state[in_arc]; |
| 1202 | 1217 |
} |
| 1203 | 1218 |
} |
| 1204 | 1219 |
|
| 1205 | 1220 |
// Update the tree structure |
| 1206 | 1221 |
void updateTreeStructure() {
|
| 1207 | 1222 |
int u, w; |
| 1208 | 1223 |
int old_rev_thread = _rev_thread[u_out]; |
| 1209 | 1224 |
int old_succ_num = _succ_num[u_out]; |
| ... | ... |
@@ -1234,179 +1249,167 @@ |
| 1234 | 1249 |
_dirty_revs.push_back(u); |
| 1235 | 1250 |
|
| 1236 | 1251 |
// Remove the subtree of stem from the thread list |
| 1237 | 1252 |
w = _rev_thread[stem]; |
| 1238 | 1253 |
_thread[w] = right; |
| 1239 | 1254 |
_rev_thread[right] = w; |
| 1240 | 1255 |
|
| 1241 | 1256 |
// Change the parent node and shift stem nodes |
| 1242 | 1257 |
_parent[stem] = par_stem; |
| 1243 | 1258 |
par_stem = stem; |
| 1244 | 1259 |
stem = new_stem; |
| 1245 | 1260 |
|
| 1246 | 1261 |
// Update u and right |
| 1247 | 1262 |
u = _last_succ[stem] == _last_succ[par_stem] ? |
| 1248 | 1263 |
_rev_thread[par_stem] : _last_succ[stem]; |
| 1249 | 1264 |
right = _thread[u]; |
| 1250 | 1265 |
} |
| 1251 | 1266 |
_parent[u_out] = par_stem; |
| 1252 | 1267 |
_thread[u] = last; |
| 1253 | 1268 |
_rev_thread[last] = u; |
| 1254 | 1269 |
_last_succ[u_out] = u; |
| 1255 | 1270 |
|
| 1256 | 1271 |
// Remove the subtree of u_out from the thread list except for |
| 1257 | 1272 |
// the case when old_rev_thread equals to v_in |
| 1258 | 1273 |
// (it also means that join and v_out coincide) |
| 1259 | 1274 |
if (old_rev_thread != v_in) {
|
| 1260 | 1275 |
_thread[old_rev_thread] = right; |
| 1261 | 1276 |
_rev_thread[right] = old_rev_thread; |
| 1262 | 1277 |
} |
| 1263 | 1278 |
|
| 1264 | 1279 |
// Update _rev_thread using the new _thread values |
| 1265 | 1280 |
for (int i = 0; i < int(_dirty_revs.size()); ++i) {
|
| 1266 | 1281 |
u = _dirty_revs[i]; |
| 1267 | 1282 |
_rev_thread[_thread[u]] = u; |
| 1268 | 1283 |
} |
| 1269 | 1284 |
|
| 1270 | 1285 |
// Update _pred, _forward, _last_succ and _succ_num for the |
| 1271 | 1286 |
// stem nodes from u_out to u_in |
| 1272 | 1287 |
int tmp_sc = 0, tmp_ls = _last_succ[u_out]; |
| 1273 | 1288 |
u = u_out; |
| 1274 | 1289 |
while (u != u_in) {
|
| 1275 | 1290 |
w = _parent[u]; |
| 1276 | 1291 |
_pred[u] = _pred[w]; |
| 1277 | 1292 |
_forward[u] = !_forward[w]; |
| 1278 | 1293 |
tmp_sc += _succ_num[u] - _succ_num[w]; |
| 1279 | 1294 |
_succ_num[u] = tmp_sc; |
| 1280 | 1295 |
_last_succ[w] = tmp_ls; |
| 1281 | 1296 |
u = w; |
| 1282 | 1297 |
} |
| 1283 | 1298 |
_pred[u_in] = in_arc; |
| 1284 | 1299 |
_forward[u_in] = (u_in == _source[in_arc]); |
| 1285 | 1300 |
_succ_num[u_in] = old_succ_num; |
| 1286 | 1301 |
|
| 1287 | 1302 |
// Set limits for updating _last_succ form v_in and v_out |
| 1288 | 1303 |
// towards the root |
| 1289 | 1304 |
int up_limit_in = -1; |
| 1290 | 1305 |
int up_limit_out = -1; |
| 1291 | 1306 |
if (_last_succ[join] == v_in) {
|
| 1292 | 1307 |
up_limit_out = join; |
| 1293 | 1308 |
} else {
|
| 1294 | 1309 |
up_limit_in = join; |
| 1295 | 1310 |
} |
| 1296 | 1311 |
|
| 1297 | 1312 |
// Update _last_succ from v_in towards the root |
| 1298 | 1313 |
for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; |
| 1299 | 1314 |
u = _parent[u]) {
|
| 1300 | 1315 |
_last_succ[u] = _last_succ[u_out]; |
| 1301 | 1316 |
} |
| 1302 | 1317 |
// Update _last_succ from v_out towards the root |
| 1303 | 1318 |
if (join != old_rev_thread && v_in != old_rev_thread) {
|
| 1304 | 1319 |
for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; |
| 1305 | 1320 |
u = _parent[u]) {
|
| 1306 | 1321 |
_last_succ[u] = old_rev_thread; |
| 1307 | 1322 |
} |
| 1308 | 1323 |
} else {
|
| 1309 | 1324 |
for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; |
| 1310 | 1325 |
u = _parent[u]) {
|
| 1311 | 1326 |
_last_succ[u] = _last_succ[u_out]; |
| 1312 | 1327 |
} |
| 1313 | 1328 |
} |
| 1314 | 1329 |
|
| 1315 | 1330 |
// Update _succ_num from v_in to join |
| 1316 | 1331 |
for (u = v_in; u != join; u = _parent[u]) {
|
| 1317 | 1332 |
_succ_num[u] += old_succ_num; |
| 1318 | 1333 |
} |
| 1319 | 1334 |
// Update _succ_num from v_out to join |
| 1320 | 1335 |
for (u = v_out; u != join; u = _parent[u]) {
|
| 1321 | 1336 |
_succ_num[u] -= old_succ_num; |
| 1322 | 1337 |
} |
| 1323 | 1338 |
} |
| 1324 | 1339 |
|
| 1325 | 1340 |
// Update potentials |
| 1326 | 1341 |
void updatePotential() {
|
| 1327 | 1342 |
Cost sigma = _forward[u_in] ? |
| 1328 | 1343 |
_pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : |
| 1329 | 1344 |
_pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; |
| 1330 |
if (_succ_num[u_in] > _node_num / 2) {
|
|
| 1331 |
// Update in the upper subtree (which contains the root) |
|
| 1332 |
int before = _rev_thread[u_in]; |
|
| 1333 |
int after = _thread[_last_succ[u_in]]; |
|
| 1334 |
_thread[before] = after; |
|
| 1335 |
_pi[_root] -= sigma; |
|
| 1336 |
for (int u = _thread[_root]; u != _root; u = _thread[u]) {
|
|
| 1337 |
_pi[u] -= sigma; |
|
| 1338 |
} |
|
| 1339 |
_thread[before] = u_in; |
|
| 1340 |
} else {
|
|
| 1341 |
// Update in the lower subtree (which has been moved) |
|
| 1342 |
int end = _thread[_last_succ[u_in]]; |
|
| 1343 |
for (int u = u_in; u != end; u = _thread[u]) {
|
|
| 1344 |
_pi[u] += sigma; |
|
| 1345 |
} |
|
| 1345 |
// Update potentials in the subtree, which has been moved |
|
| 1346 |
int end = _thread[_last_succ[u_in]]; |
|
| 1347 |
for (int u = u_in; u != end; u = _thread[u]) {
|
|
| 1348 |
_pi[u] += sigma; |
|
| 1346 | 1349 |
} |
| 1347 | 1350 |
} |
| 1348 | 1351 |
|
| 1349 | 1352 |
// Execute the algorithm |
| 1350 | 1353 |
bool start(PivotRule pivot_rule) {
|
| 1351 | 1354 |
// Select the pivot rule implementation |
| 1352 | 1355 |
switch (pivot_rule) {
|
| 1353 | 1356 |
case FIRST_ELIGIBLE: |
| 1354 | 1357 |
return start<FirstEligiblePivotRule>(); |
| 1355 | 1358 |
case BEST_ELIGIBLE: |
| 1356 | 1359 |
return start<BestEligiblePivotRule>(); |
| 1357 | 1360 |
case BLOCK_SEARCH: |
| 1358 | 1361 |
return start<BlockSearchPivotRule>(); |
| 1359 | 1362 |
case CANDIDATE_LIST: |
| 1360 | 1363 |
return start<CandidateListPivotRule>(); |
| 1361 | 1364 |
case ALTERING_LIST: |
| 1362 | 1365 |
return start<AlteringListPivotRule>(); |
| 1363 | 1366 |
} |
| 1364 | 1367 |
return false; |
| 1365 | 1368 |
} |
| 1366 | 1369 |
|
| 1367 | 1370 |
template <typename PivotRuleImpl> |
| 1368 | 1371 |
bool start() {
|
| 1369 | 1372 |
PivotRuleImpl pivot(*this); |
| 1370 | 1373 |
|
| 1371 | 1374 |
// Execute the Network Simplex algorithm |
| 1372 | 1375 |
while (pivot.findEnteringArc()) {
|
| 1373 | 1376 |
findJoinNode(); |
| 1374 | 1377 |
bool change = findLeavingArc(); |
| 1375 | 1378 |
changeFlow(change); |
| 1376 | 1379 |
if (change) {
|
| 1377 | 1380 |
updateTreeStructure(); |
| 1378 | 1381 |
updatePotential(); |
| 1379 | 1382 |
} |
| 1380 | 1383 |
} |
| 1381 | 1384 |
|
| 1382 | 1385 |
// Check if the flow amount equals zero on all the artificial arcs |
| 1383 | 1386 |
for (int e = _arc_num; e != _arc_num + _node_num; ++e) {
|
| 1384 | 1387 |
if (_flow[e] > 0) return false; |
| 1385 | 1388 |
} |
| 1386 | 1389 |
|
| 1387 | 1390 |
// Copy flow values to _flow_map |
| 1388 | 1391 |
if (_plower) {
|
| 1389 | 1392 |
for (int i = 0; i != _arc_num; ++i) {
|
| 1390 | 1393 |
Arc e = _arc_ref[i]; |
| 1391 | 1394 |
_flow_map->set(e, (*_plower)[e] + _flow[i]); |
| 1392 | 1395 |
} |
| 1393 | 1396 |
} else {
|
| 1394 | 1397 |
for (int i = 0; i != _arc_num; ++i) {
|
| 1395 | 1398 |
_flow_map->set(_arc_ref[i], _flow[i]); |
| 1396 | 1399 |
} |
| 1397 | 1400 |
} |
| 1398 | 1401 |
// Copy potential values to _potential_map |
| 1399 | 1402 |
for (NodeIt n(_graph); n != INVALID; ++n) {
|
| 1400 | 1403 |
_potential_map->set(n, _pi[_node_id[n]]); |
| 1401 | 1404 |
} |
| 1402 | 1405 |
|
| 1403 | 1406 |
return true; |
| 1404 | 1407 |
} |
| 1405 | 1408 |
|
| 1406 | 1409 |
}; //class NetworkSimplex |
| 1407 | 1410 |
|
| 1408 | 1411 |
///@} |
| 1409 | 1412 |
|
| 1410 | 1413 |
} //namespace lemon |
| 1411 | 1414 |
|
| 1412 | 1415 |
#endif //LEMON_NETWORK_SIMPLEX_H |
0 comments (0 inline)