gravatar
kpeter (Peter Kovacs)
kpeter@inf.elte.hu
Use XTI implementation instead of ATI in NetworkSimplex (#234) XTI (eXtended Threaded Index) is an imporved version of the widely known ATI (Augmented Threaded Index) method for storing and updating the spanning tree structure in Network Simplex algorithms. In the ATI data structure three indices are stored for each node: predecessor, thread and depth. In the XTI data structure depth is replaced by the number of successors and the last successor (according to the thread index).
0 1 0
default
1 file changed with 136 insertions and 68 deletions:
↑ Collapse diff ↑
Ignore white space 48 line context
... ...
@@ -133,52 +133,55 @@
133 133
    // Result maps
134 134
    FlowMap *_flow_map;
135 135
    PotentialMap *_potential_map;
136 136
    bool _local_flow;
137 137
    bool _local_potential;
138 138

	
139 139
    // The number of nodes and arcs in the original graph
140 140
    int _node_num;
141 141
    int _arc_num;
142 142

	
143 143
    // Data structures for storing the graph
144 144
    IntNodeMap _node_id;
145 145
    ArcVector _arc_ref;
146 146
    IntVector _source;
147 147
    IntVector _target;
148 148

	
149 149
    // Node and arc maps
150 150
    CapacityVector _cap;
151 151
    CostVector _cost;
152 152
    CostVector _supply;
153 153
    CapacityVector _flow;
154 154
    CostVector _pi;
155 155

	
156 156
    // Data for storing the spanning tree structure
157
    IntVector _depth;
158 157
    IntVector _parent;
159 158
    IntVector _pred;
160 159
    IntVector _thread;
160
    IntVector _rev_thread;
161
    IntVector _succ_num;
162
    IntVector _last_succ;
163
    IntVector _dirty_revs;
161 164
    BoolVector _forward;
162 165
    IntVector _state;
163 166
    int _root;
164 167

	
165 168
    // Temporary data used in the current pivot iteration
166 169
    int in_arc, join, u_in, v_in, u_out, v_out;
167 170
    int first, second, right, last;
168 171
    int stem, par_stem, new_stem;
169 172
    Capacity delta;
170 173

	
171 174
  private:
172 175

	
173 176
    /// \brief Implementation of the "First Eligible" pivot rule for the
174 177
    /// \ref NetworkSimplex "network simplex" algorithm.
175 178
    ///
176 179
    /// This class implements the "First Eligible" pivot rule
177 180
    /// for the \ref NetworkSimplex "network simplex" algorithm.
178 181
    ///
179 182
    /// For more information see \ref NetworkSimplex::run().
180 183
    class FirstEligiblePivotRule
181 184
    {
182 185
    private:
183 186

	
184 187
      // References to the NetworkSimplex class
... ...
@@ -829,149 +832,156 @@
829 832
        _flow_map = new FlowMap(_graph);
830 833
        _local_flow = true;
831 834
      }
832 835
      if (!_potential_map) {
833 836
        _potential_map = new PotentialMap(_graph);
834 837
        _local_potential = true;
835 838
      }
836 839

	
837 840
      // Initialize vectors
838 841
      _node_num = countNodes(_graph);
839 842
      _arc_num = countArcs(_graph);
840 843
      int all_node_num = _node_num + 1;
841 844
      int all_arc_num = _arc_num + _node_num;
842 845

	
843 846
      _arc_ref.resize(_arc_num);
844 847
      _source.resize(all_arc_num);
845 848
      _target.resize(all_arc_num);
846 849

	
847 850
      _cap.resize(all_arc_num);
848 851
      _cost.resize(all_arc_num);
849 852
      _supply.resize(all_node_num);
850 853
      _flow.resize(all_arc_num, 0);
851 854
      _pi.resize(all_node_num, 0);
852 855

	
853
      _depth.resize(all_node_num);
854 856
      _parent.resize(all_node_num);
855 857
      _pred.resize(all_node_num);
856 858
      _forward.resize(all_node_num);
857 859
      _thread.resize(all_node_num);
860
      _rev_thread.resize(all_node_num);
861
      _succ_num.resize(all_node_num);
862
      _last_succ.resize(all_node_num);
858 863
      _state.resize(all_arc_num, STATE_LOWER);
859 864

	
860 865
      // Initialize node related data
861 866
      bool valid_supply = true;
862 867
      if (_orig_supply) {
863 868
        Supply sum = 0;
864 869
        int i = 0;
865 870
        for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
866 871
          _node_id[n] = i;
867 872
          _supply[i] = (*_orig_supply)[n];
868 873
          sum += _supply[i];
869 874
        }
870 875
        valid_supply = (sum == 0);
871 876
      } else {
872 877
        int i = 0;
873 878
        for (NodeIt n(_graph); n != INVALID; ++n, ++i) {
874 879
          _node_id[n] = i;
875 880
          _supply[i] = 0;
876 881
        }
877 882
        _supply[_node_id[_orig_source]] =  _orig_flow_value;
878 883
        _supply[_node_id[_orig_target]] = -_orig_flow_value;
879 884
      }
880 885
      if (!valid_supply) return false;
881 886

	
882 887
      // Set data for the artificial root node
883 888
      _root = _node_num;
884
      _depth[_root] = 0;
885 889
      _parent[_root] = -1;
886 890
      _pred[_root] = -1;
887 891
      _thread[_root] = 0;
892
      _rev_thread[0] = _root;
893
      _succ_num[_root] = all_node_num;
894
      _last_succ[_root] = _root - 1;
888 895
      _supply[_root] = 0;
889 896
      _pi[_root] = 0;
890 897

	
891 898
      // Store the arcs in a mixed order
892 899
      int k = std::max(int(sqrt(_arc_num)), 10);
893 900
      int i = 0;
894 901
      for (ArcIt e(_graph); e != INVALID; ++e) {
895 902
        _arc_ref[i] = e;
896 903
        if ((i += k) >= _arc_num) i = (i % k) + 1;
897 904
      }
898 905

	
899 906
      // Initialize arc maps
900 907
      for (int i = 0; i != _arc_num; ++i) {
901 908
        Arc e = _arc_ref[i];
902 909
        _source[i] = _node_id[_graph.source(e)];
903 910
        _target[i] = _node_id[_graph.target(e)];
904 911
        _cost[i] = _orig_cost[e];
905 912
        _cap[i] = _orig_cap[e];
906 913
      }
907 914

	
908 915
      // Remove non-zero lower bounds
909 916
      if (_orig_lower) {
910 917
        for (int i = 0; i != _arc_num; ++i) {
911 918
          Capacity c = (*_orig_lower)[_arc_ref[i]];
912 919
          if (c != 0) {
913 920
            _cap[i] -= c;
914 921
            _supply[_source[i]] -= c;
915 922
            _supply[_target[i]] += c;
916 923
          }
917 924
        }
918 925
      }
919 926

	
920 927
      // Add artificial arcs and initialize the spanning tree data structure
921 928
      Cost max_cost = std::numeric_limits<Cost>::max() / 4;
922 929
      Capacity max_cap = std::numeric_limits<Capacity>::max();
923 930
      for (int u = 0, e = _arc_num; u != _node_num; ++u, ++e) {
924 931
        _thread[u] = u + 1;
925
        _depth[u] = 1;
932
        _rev_thread[u + 1] = u;
933
        _succ_num[u] = 1;
934
        _last_succ[u] = u;
926 935
        _parent[u] = _root;
927 936
        _pred[u] = e;
928 937
        if (_supply[u] >= 0) {
929 938
          _flow[e] = _supply[u];
930 939
          _forward[u] = true;
931 940
          _pi[u] = -max_cost;
932 941
        } else {
933 942
          _flow[e] = -_supply[u];
934 943
          _forward[u] = false;
935 944
          _pi[u] = max_cost;
936 945
        }
937 946
        _cost[e] = max_cost;
938 947
        _cap[e] = max_cap;
939 948
        _state[e] = STATE_TREE;
940 949
      }
941 950

	
942 951
      return true;
943 952
    }
944 953

	
945 954
    // Find the join node
946 955
    void findJoinNode() {
947 956
      int u = _source[in_arc];
948 957
      int v = _target[in_arc];
949
      while (_depth[u] > _depth[v]) u = _parent[u];
950
      while (_depth[v] > _depth[u]) v = _parent[v];
951 958
      while (u != v) {
952
        u = _parent[u];
953
        v = _parent[v];
959
        if (_succ_num[u] < _succ_num[v]) {
960
          u = _parent[u];
961
        } else {
962
          v = _parent[v];
963
        }
954 964
      }
955 965
      join = u;
956 966
    }
957 967

	
958 968
    // Find the leaving arc of the cycle and returns true if the
959 969
    // leaving arc is not the same as the entering arc
960 970
    bool findLeavingArc() {
961 971
      // Initialize first and second nodes according to the direction
962 972
      // of the cycle
963 973
      if (_state[in_arc] == STATE_LOWER) {
964 974
        first  = _source[in_arc];
965 975
        second = _target[in_arc];
966 976
      } else {
967 977
        first  = _target[in_arc];
968 978
        second = _source[in_arc];
969 979
      }
970 980
      delta = _cap[in_arc];
971 981
      int result = 0;
972 982
      Capacity d;
973 983
      int e;
974 984

	
975 985
      // Search the cycle along the path form the first node to the root
976 986
      for (int u = first; u != join; u = _parent[u]) {
977 987
        e = _pred[u];
... ...
@@ -1005,164 +1015,222 @@
1005 1015

	
1006 1016
    // Change _flow and _state vectors
1007 1017
    void changeFlow(bool change) {
1008 1018
      // Augment along the cycle
1009 1019
      if (delta > 0) {
1010 1020
        Capacity val = _state[in_arc] * delta;
1011 1021
        _flow[in_arc] += val;
1012 1022
        for (int u = _source[in_arc]; u != join; u = _parent[u]) {
1013 1023
          _flow[_pred[u]] += _forward[u] ? -val : val;
1014 1024
        }
1015 1025
        for (int u = _target[in_arc]; u != join; u = _parent[u]) {
1016 1026
          _flow[_pred[u]] += _forward[u] ? val : -val;
1017 1027
        }
1018 1028
      }
1019 1029
      // Update the state of the entering and leaving arcs
1020 1030
      if (change) {
1021 1031
        _state[in_arc] = STATE_TREE;
1022 1032
        _state[_pred[u_out]] =
1023 1033
          (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER;
1024 1034
      } else {
1025 1035
        _state[in_arc] = -_state[in_arc];
1026 1036
      }
1027 1037
    }
1028 1038

	
1029
    // Update _thread and _parent vectors
1030
    void updateThreadParent() {
1031
      int u;
1039
    // Update the tree structure
1040
    void updateTreeStructure() {
1041
      int u, w;
1042
      int old_rev_thread = _rev_thread[u_out];
1043
      int old_succ_num = _succ_num[u_out];
1044
      int old_last_succ = _last_succ[u_out];
1032 1045
      v_out = _parent[u_out];
1033 1046

	
1034
      // Handle the case when join and v_out coincide
1035
      bool par_first = false;
1036
      if (join == v_out) {
1037
        for (u = join; u != u_in && u != v_in; u = _thread[u]) ;
1038
        if (u == v_in) {
1039
          par_first = true;
1040
          while (_thread[u] != u_out) u = _thread[u];
1041
          first = u;
1042
        }
1047
      u = _last_succ[u_in];  // the last successor of u_in
1048
      right = _thread[u];    // the node after it
1049

	
1050
      // Handle the case when old_rev_thread equals to v_in
1051
      // (it also means that join and v_out coincide)
1052
      if (old_rev_thread == v_in) {
1053
        last = _thread[_last_succ[u_out]];
1054
      } else {
1055
        last = _thread[v_in];
1043 1056
      }
1044 1057

	
1045
      // Find the last successor of u_in (u) and the node after it (right)
1046
      // according to the thread index
1047
      for (u = u_in; _depth[_thread[u]] > _depth[u_in]; u = _thread[u]) ;
1048
      right = _thread[u];
1049
      if (_thread[v_in] == u_out) {
1050
        for (last = u; _depth[last] > _depth[u_out]; last = _thread[last]) ;
1051
        if (last == u_out) last = _thread[last];
1052
      }
1053
      else last = _thread[v_in];
1054

	
1055
      // Update stem nodes
1058
      // Update _thread and _parent along the stem nodes (i.e. the nodes
1059
      // between u_in and u_out, whose parent have to be changed)
1056 1060
      _thread[v_in] = stem = u_in;
1061
      _dirty_revs.clear();
1062
      _dirty_revs.push_back(v_in);
1057 1063
      par_stem = v_in;
1058 1064
      while (stem != u_out) {
1059
        _thread[u] = new_stem = _parent[stem];
1065
        // Insert the next stem node into the thread list
1066
        new_stem = _parent[stem];
1067
        _thread[u] = new_stem;
1068
        _dirty_revs.push_back(u);
1060 1069

	
1061
        // Find the node just before the stem node (u) according to
1062
        // the original thread index
1063
        for (u = new_stem; _thread[u] != stem; u = _thread[u]) ;
1064
        _thread[u] = right;
1070
        // Remove the subtree of stem from the thread list
1071
        w = _rev_thread[stem];
1072
        _thread[w] = right;
1073
        _rev_thread[right] = w;
1065 1074

	
1066
        // Change the parent node of stem and shift stem and par_stem nodes
1075
        // Change the parent node and shift stem nodes
1067 1076
        _parent[stem] = par_stem;
1068 1077
        par_stem = stem;
1069 1078
        stem = new_stem;
1070 1079

	
1071
        // Find the last successor of stem (u) and the node after it (right)
1072
        // according to the thread index
1073
        for (u = stem; _depth[_thread[u]] > _depth[stem]; u = _thread[u]) ;
1080
        // Update u and right
1081
        u = _last_succ[stem] == _last_succ[par_stem] ?
1082
          _rev_thread[par_stem] : _last_succ[stem];
1074 1083
        right = _thread[u];
1075 1084
      }
1076 1085
      _parent[u_out] = par_stem;
1077 1086
      _thread[u] = last;
1087
      _rev_thread[last] = u;
1088
      _last_succ[u_out] = u;
1078 1089

	
1079
      if (join == v_out && par_first) {
1080
        if (first != v_in) _thread[first] = right;
1090
      // Remove the subtree of u_out from the thread list except for
1091
      // the case when old_rev_thread equals to v_in
1092
      // (it also means that join and v_out coincide)
1093
      if (old_rev_thread != v_in) {
1094
        _thread[old_rev_thread] = right;
1095
        _rev_thread[right] = old_rev_thread;
1096
      }
1097

	
1098
      // Update _rev_thread using the new _thread values
1099
      for (int i = 0; i < int(_dirty_revs.size()); ++i) {
1100
        u = _dirty_revs[i];
1101
        _rev_thread[_thread[u]] = u;
1102
      }
1103

	
1104
      // Update _pred, _forward, _last_succ and _succ_num for the
1105
      // stem nodes from u_out to u_in
1106
      int tmp_sc = 0, tmp_ls = _last_succ[u_out];
1107
      u = u_out;
1108
      while (u != u_in) {
1109
        w = _parent[u];
1110
        _pred[u] = _pred[w];
1111
        _forward[u] = !_forward[w];
1112
        tmp_sc += _succ_num[u] - _succ_num[w];
1113
        _succ_num[u] = tmp_sc;
1114
        _last_succ[w] = tmp_ls;
1115
        u = w;
1116
      }
1117
      _pred[u_in] = in_arc;
1118
      _forward[u_in] = (u_in == _source[in_arc]);
1119
      _succ_num[u_in] = old_succ_num;
1120

	
1121
      // Set limits for updating _last_succ form v_in and v_out
1122
      // towards the root
1123
      int up_limit_in = -1;
1124
      int up_limit_out = -1;
1125
      if (_last_succ[join] == v_in) {
1126
        up_limit_out = join;
1081 1127
      } else {
1082
        for (u = v_out; _thread[u] != u_out; u = _thread[u]) ;
1083
        _thread[u] = right;
1128
        up_limit_in = join;
1129
      }
1130

	
1131
      // Update _last_succ from v_in towards the root
1132
      for (u = v_in; u != up_limit_in && _last_succ[u] == v_in;
1133
           u = _parent[u]) {
1134
        _last_succ[u] = _last_succ[u_out];
1135
      }
1136
      // Update _last_succ from v_out towards the root
1137
      if (join != old_rev_thread && v_in != old_rev_thread) {
1138
        for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
1139
             u = _parent[u]) {
1140
          _last_succ[u] = old_rev_thread;
1141
        }
1142
      } else {
1143
        for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
1144
             u = _parent[u]) {
1145
          _last_succ[u] = _last_succ[u_out];
1146
        }
1147
      }
1148

	
1149
      // Update _succ_num from v_in to join
1150
      for (u = v_in; u != join; u = _parent[u]) {
1151
        _succ_num[u] += old_succ_num;
1152
      }
1153
      // Update _succ_num from v_out to join
1154
      for (u = v_out; u != join; u = _parent[u]) {
1155
        _succ_num[u] -= old_succ_num;
1084 1156
      }
1085 1157
    }
1086 1158

	
1087
    // Update _pred and _forward vectors
1088
    void updatePredArc() {
1089
      int u = u_out, v;
1090
      while (u != u_in) {
1091
        v = _parent[u];
1092
        _pred[u] = _pred[v];
1093
        _forward[u] = !_forward[v];
1094
        u = v;
1095
      }
1096
      _pred[u_in] = in_arc;
1097
      _forward[u_in] = (u_in == _source[in_arc]);
1098
    }
1099

	
1100
    // Update _depth and _potential vectors
1101
    void updateDepthPotential() {
1102
      _depth[u_in] = _depth[v_in] + 1;
1159
    // Update potentials
1160
    void updatePotential() {
1103 1161
      Cost sigma = _forward[u_in] ?
1104 1162
        _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] :
1105 1163
        _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]];
1106
      _pi[u_in] += sigma;
1107
      for(int u = _thread[u_in]; _parent[u] != -1; u = _thread[u]) {
1108
        _depth[u] = _depth[_parent[u]] + 1;
1109
        if (_depth[u] <= _depth[u_in]) break;
1110
        _pi[u] += sigma;
1164
      if (_succ_num[u_in] > _node_num / 2) {
1165
        // Update in the upper subtree (which contains the root)
1166
        int before = _rev_thread[u_in];
1167
        int after = _thread[_last_succ[u_in]];
1168
        _thread[before] = after;
1169
        _pi[_root] -= sigma;
1170
        for (int u = _thread[_root]; u != _root; u = _thread[u]) {
1171
          _pi[u] -= sigma;
1172
        }
1173
        _thread[before] = u_in;
1174
      } else {
1175
        // Update in the lower subtree (which has been moved)
1176
        int end = _thread[_last_succ[u_in]];
1177
        for (int u = u_in; u != end; u = _thread[u]) {
1178
          _pi[u] += sigma;
1179
        }
1111 1180
      }
1112 1181
    }
1113 1182

	
1114 1183
    // Execute the algorithm
1115 1184
    bool start(PivotRuleEnum pivot_rule) {
1116 1185
      // Select the pivot rule implementation
1117 1186
      switch (pivot_rule) {
1118 1187
        case FIRST_ELIGIBLE_PIVOT:
1119 1188
          return start<FirstEligiblePivotRule>();
1120 1189
        case BEST_ELIGIBLE_PIVOT:
1121 1190
          return start<BestEligiblePivotRule>();
1122 1191
        case BLOCK_SEARCH_PIVOT:
1123 1192
          return start<BlockSearchPivotRule>();
1124 1193
        case CANDIDATE_LIST_PIVOT:
1125 1194
          return start<CandidateListPivotRule>();
1126 1195
        case ALTERING_LIST_PIVOT:
1127 1196
          return start<AlteringListPivotRule>();
1128 1197
      }
1129 1198
      return false;
1130 1199
    }
1131 1200

	
1132 1201
    template<class PivotRuleImplementation>
1133 1202
    bool start() {
1134 1203
      PivotRuleImplementation pivot(*this);
1135 1204

	
1136 1205
      // Execute the network simplex algorithm
1137 1206
      while (pivot.findEnteringArc()) {
1138 1207
        findJoinNode();
1139 1208
        bool change = findLeavingArc();
1140 1209
        changeFlow(change);
1141 1210
        if (change) {
1142
          updateThreadParent();
1143
          updatePredArc();
1144
          updateDepthPotential();
1211
          updateTreeStructure();
1212
          updatePotential();
1145 1213
        }
1146 1214
      }
1147 1215

	
1148 1216
      // Check if the flow amount equals zero on all the artificial arcs
1149 1217
      for (int e = _arc_num; e != _arc_num + _node_num; ++e) {
1150 1218
        if (_flow[e] > 0) return false;
1151 1219
      }
1152 1220

	
1153 1221
      // Copy flow values to _flow_map
1154 1222
      if (_orig_lower) {
1155 1223
        for (int i = 0; i != _arc_num; ++i) {
1156 1224
          Arc e = _arc_ref[i];
1157 1225
          _flow_map->set(e, (*_orig_lower)[e] + _flow[i]);
1158 1226
        }
1159 1227
      } else {
1160 1228
        for (int i = 0; i != _arc_num; ++i) {
1161 1229
          _flow_map->set(_arc_ref[i], _flow[i]);
1162 1230
        }
1163 1231
      }
1164 1232
      // Copy potential values to _potential_map
1165 1233
      for (NodeIt n(_graph); n != INVALID; ++n) {
1166 1234
        _potential_map->set(n, _pi[_node_id[n]]);
1167 1235
      }
1168 1236

	
0 comments (0 inline)