0
3
0
1 | 1 |
/* -*- mode: C++; indent-tabs-mode: nil; -*- |
2 | 2 |
* |
3 | 3 |
* This file is a part of LEMON, a generic C++ optimization library. |
4 | 4 |
* |
5 | 5 |
* Copyright (C) 2003-2009 |
6 | 6 |
* Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
7 | 7 |
* (Egervary Research Group on Combinatorial Optimization, EGRES). |
8 | 8 |
* |
9 | 9 |
* Permission to use, modify and distribute this software is granted |
10 | 10 |
* provided that this copyright notice appears in all copies. For |
11 | 11 |
* precise terms see the accompanying LICENSE file. |
12 | 12 |
* |
13 | 13 |
* This software is provided "AS IS" with no warranty of any kind, |
14 | 14 |
* express or implied, and with no claim as to its suitability for any |
15 | 15 |
* purpose. |
16 | 16 |
* |
17 | 17 |
*/ |
18 | 18 |
|
19 | 19 |
/// \ingroup demos |
20 | 20 |
/// \file |
21 | 21 |
/// \brief Demo of the graph drawing function \ref graphToEps() |
22 | 22 |
/// |
23 | 23 |
/// This demo program shows examples how to use the function \ref |
24 | 24 |
/// graphToEps(). It takes no input but simply creates seven |
25 | 25 |
/// <tt>.eps</tt> files demonstrating the capability of \ref |
26 | 26 |
/// graphToEps(), and showing how to draw directed graphs, |
27 | 27 |
/// how to handle parallel egdes, how to change the properties (like |
28 | 28 |
/// color, shape, size, title etc.) of nodes and arcs individually |
29 | 29 |
/// using appropriate graph maps. |
30 | 30 |
/// |
31 | 31 |
/// \include graph_to_eps_demo.cc |
32 | 32 |
|
33 | 33 |
#include<lemon/list_graph.h> |
34 | 34 |
#include<lemon/graph_to_eps.h> |
35 | 35 |
#include<lemon/math.h> |
36 | 36 |
|
37 | 37 |
using namespace std; |
38 | 38 |
using namespace lemon; |
39 | 39 |
|
40 | 40 |
int main() |
41 | 41 |
{ |
42 | 42 |
Palette palette; |
43 | 43 |
Palette paletteW(true); |
44 | 44 |
|
45 | 45 |
// Create a small digraph |
46 | 46 |
ListDigraph g; |
47 | 47 |
typedef ListDigraph::Node Node; |
48 | 48 |
typedef ListDigraph::NodeIt NodeIt; |
49 | 49 |
typedef ListDigraph::Arc Arc; |
50 | 50 |
typedef dim2::Point<int> Point; |
51 | 51 |
|
52 | 52 |
Node n1=g.addNode(); |
53 | 53 |
Node n2=g.addNode(); |
54 | 54 |
Node n3=g.addNode(); |
55 | 55 |
Node n4=g.addNode(); |
56 | 56 |
Node n5=g.addNode(); |
57 | 57 |
|
58 | 58 |
ListDigraph::NodeMap<Point> coords(g); |
59 | 59 |
ListDigraph::NodeMap<double> sizes(g); |
60 | 60 |
ListDigraph::NodeMap<int> colors(g); |
61 | 61 |
ListDigraph::NodeMap<int> shapes(g); |
62 | 62 |
ListDigraph::ArcMap<int> acolors(g); |
63 | 63 |
ListDigraph::ArcMap<int> widths(g); |
64 | 64 |
|
65 | 65 |
coords[n1]=Point(50,50); sizes[n1]=1; colors[n1]=1; shapes[n1]=0; |
66 | 66 |
coords[n2]=Point(50,70); sizes[n2]=2; colors[n2]=2; shapes[n2]=2; |
67 | 67 |
coords[n3]=Point(70,70); sizes[n3]=1; colors[n3]=3; shapes[n3]=0; |
68 | 68 |
coords[n4]=Point(70,50); sizes[n4]=2; colors[n4]=4; shapes[n4]=1; |
69 | 69 |
coords[n5]=Point(85,60); sizes[n5]=3; colors[n5]=5; shapes[n5]=2; |
70 | 70 |
|
71 | 71 |
Arc a; |
72 | 72 |
|
73 | 73 |
a=g.addArc(n1,n2); acolors[a]=0; widths[a]=1; |
74 | 74 |
a=g.addArc(n2,n3); acolors[a]=0; widths[a]=1; |
75 | 75 |
a=g.addArc(n3,n5); acolors[a]=0; widths[a]=3; |
76 | 76 |
a=g.addArc(n5,n4); acolors[a]=0; widths[a]=1; |
77 | 77 |
a=g.addArc(n4,n1); acolors[a]=0; widths[a]=1; |
78 | 78 |
a=g.addArc(n2,n4); acolors[a]=1; widths[a]=2; |
79 | 79 |
a=g.addArc(n3,n4); acolors[a]=2; widths[a]=1; |
80 | 80 |
|
81 | 81 |
IdMap<ListDigraph,Node> id(g); |
82 | 82 |
|
83 | 83 |
// Create .eps files showing the digraph with different options |
84 | 84 |
cout << "Create 'graph_to_eps_demo_out_1_pure.eps'" << endl; |
85 | 85 |
graphToEps(g,"graph_to_eps_demo_out_1_pure.eps"). |
86 | 86 |
coords(coords). |
87 | 87 |
title("Sample .eps figure"). |
88 | 88 |
copyright("(C) 2003-2009 LEMON Project"). |
89 | 89 |
run(); |
90 | 90 |
|
91 | 91 |
cout << "Create 'graph_to_eps_demo_out_2.eps'" << endl; |
92 | 92 |
graphToEps(g,"graph_to_eps_demo_out_2.eps"). |
93 | 93 |
coords(coords). |
94 | 94 |
title("Sample .eps figure"). |
95 | 95 |
copyright("(C) 2003-2009 LEMON Project"). |
96 | 96 |
absoluteNodeSizes().absoluteArcWidths(). |
97 | 97 |
nodeScale(2).nodeSizes(sizes). |
98 | 98 |
nodeShapes(shapes). |
99 | 99 |
nodeColors(composeMap(palette,colors)). |
100 | 100 |
arcColors(composeMap(palette,acolors)). |
101 | 101 |
arcWidthScale(.4).arcWidths(widths). |
102 | 102 |
nodeTexts(id).nodeTextSize(3). |
103 | 103 |
run(); |
104 | 104 |
|
105 | 105 |
cout << "Create 'graph_to_eps_demo_out_3_arr.eps'" << endl; |
106 | 106 |
graphToEps(g,"graph_to_eps_demo_out_3_arr.eps"). |
107 | 107 |
title("Sample .eps figure (with arrowheads)"). |
108 | 108 |
copyright("(C) 2003-2009 LEMON Project"). |
109 | 109 |
absoluteNodeSizes().absoluteArcWidths(). |
110 | 110 |
nodeColors(composeMap(palette,colors)). |
111 | 111 |
coords(coords). |
112 | 112 |
nodeScale(2).nodeSizes(sizes). |
113 | 113 |
nodeShapes(shapes). |
114 | 114 |
arcColors(composeMap(palette,acolors)). |
115 | 115 |
arcWidthScale(.4).arcWidths(widths). |
116 | 116 |
nodeTexts(id).nodeTextSize(3). |
117 | 117 |
drawArrows().arrowWidth(2).arrowLength(2). |
118 | 118 |
run(); |
119 | 119 |
|
120 | 120 |
// Add more arcs to the digraph |
121 | 121 |
a=g.addArc(n1,n4); acolors[a]=2; widths[a]=1; |
122 | 122 |
a=g.addArc(n4,n1); acolors[a]=1; widths[a]=2; |
123 | 123 |
|
124 | 124 |
a=g.addArc(n1,n2); acolors[a]=1; widths[a]=1; |
125 | 125 |
a=g.addArc(n1,n2); acolors[a]=2; widths[a]=1; |
126 | 126 |
a=g.addArc(n1,n2); acolors[a]=3; widths[a]=1; |
127 | 127 |
a=g.addArc(n1,n2); acolors[a]=4; widths[a]=1; |
128 | 128 |
a=g.addArc(n1,n2); acolors[a]=5; widths[a]=1; |
129 | 129 |
a=g.addArc(n1,n2); acolors[a]=6; widths[a]=1; |
130 | 130 |
a=g.addArc(n1,n2); acolors[a]=7; widths[a]=1; |
131 | 131 |
|
132 | 132 |
cout << "Create 'graph_to_eps_demo_out_4_par.eps'" << endl; |
133 | 133 |
graphToEps(g,"graph_to_eps_demo_out_4_par.eps"). |
134 | 134 |
title("Sample .eps figure (parallel arcs)"). |
135 | 135 |
copyright("(C) 2003-2009 LEMON Project"). |
136 | 136 |
absoluteNodeSizes().absoluteArcWidths(). |
137 | 137 |
nodeShapes(shapes). |
138 | 138 |
coords(coords). |
139 | 139 |
nodeScale(2).nodeSizes(sizes). |
140 | 140 |
nodeColors(composeMap(palette,colors)). |
141 | 141 |
arcColors(composeMap(palette,acolors)). |
142 | 142 |
arcWidthScale(.4).arcWidths(widths). |
143 | 143 |
nodeTexts(id).nodeTextSize(3). |
144 | 144 |
enableParallel().parArcDist(1.5). |
145 | 145 |
run(); |
146 | 146 |
|
147 | 147 |
cout << "Create 'graph_to_eps_demo_out_5_par_arr.eps'" << endl; |
148 | 148 |
graphToEps(g,"graph_to_eps_demo_out_5_par_arr.eps"). |
149 | 149 |
title("Sample .eps figure (parallel arcs and arrowheads)"). |
150 | 150 |
copyright("(C) 2003-2009 LEMON Project"). |
151 | 151 |
absoluteNodeSizes().absoluteArcWidths(). |
152 | 152 |
nodeScale(2).nodeSizes(sizes). |
153 | 153 |
coords(coords). |
154 | 154 |
nodeShapes(shapes). |
155 | 155 |
nodeColors(composeMap(palette,colors)). |
156 | 156 |
arcColors(composeMap(palette,acolors)). |
157 | 157 |
arcWidthScale(.3).arcWidths(widths). |
158 | 158 |
nodeTexts(id).nodeTextSize(3). |
159 | 159 |
enableParallel().parArcDist(1). |
160 | 160 |
drawArrows().arrowWidth(1).arrowLength(1). |
161 | 161 |
run(); |
162 | 162 |
|
163 | 163 |
cout << "Create 'graph_to_eps_demo_out_6_par_arr_a4.eps'" << endl; |
164 | 164 |
graphToEps(g,"graph_to_eps_demo_out_6_par_arr_a4.eps"). |
165 | 165 |
title("Sample .eps figure (fits to A4)"). |
166 | 166 |
copyright("(C) 2003-2009 LEMON Project"). |
167 | 167 |
scaleToA4(). |
168 | 168 |
absoluteNodeSizes().absoluteArcWidths(). |
169 | 169 |
nodeScale(2).nodeSizes(sizes). |
170 | 170 |
coords(coords). |
171 | 171 |
nodeShapes(shapes). |
172 | 172 |
nodeColors(composeMap(palette,colors)). |
173 | 173 |
arcColors(composeMap(palette,acolors)). |
174 | 174 |
arcWidthScale(.3).arcWidths(widths). |
175 | 175 |
nodeTexts(id).nodeTextSize(3). |
176 | 176 |
enableParallel().parArcDist(1). |
177 | 177 |
drawArrows().arrowWidth(1).arrowLength(1). |
178 | 178 |
run(); |
179 | 179 |
|
180 | 180 |
// Create an .eps file showing the colors of a default Palette |
181 | 181 |
ListDigraph h; |
182 | 182 |
ListDigraph::NodeMap<int> hcolors(h); |
183 | 183 |
ListDigraph::NodeMap<Point> hcoords(h); |
184 | 184 |
|
185 |
int cols=int(sqrt(double(palette.size()))); |
|
185 |
int cols=int(std::sqrt(double(palette.size()))); |
|
186 | 186 |
for(int i=0;i<int(paletteW.size());i++) { |
187 | 187 |
Node n=h.addNode(); |
188 | 188 |
hcoords[n]=Point(1+i%cols,1+i/cols); |
189 | 189 |
hcolors[n]=i; |
190 | 190 |
} |
191 | 191 |
|
192 | 192 |
cout << "Create 'graph_to_eps_demo_out_7_colors.eps'" << endl; |
193 | 193 |
graphToEps(h,"graph_to_eps_demo_out_7_colors.eps"). |
194 | 194 |
scale(60). |
195 | 195 |
title("Sample .eps figure (Palette demo)"). |
196 | 196 |
copyright("(C) 2003-2009 LEMON Project"). |
197 | 197 |
coords(hcoords). |
198 | 198 |
absoluteNodeSizes().absoluteArcWidths(). |
199 | 199 |
nodeScale(.45). |
200 | 200 |
distantColorNodeTexts(). |
201 | 201 |
nodeTexts(hcolors).nodeTextSize(.6). |
202 | 202 |
nodeColors(composeMap(paletteW,hcolors)). |
203 | 203 |
run(); |
204 | 204 |
|
205 | 205 |
return 0; |
206 | 206 |
} |
1 | 1 |
/* -*- mode: C++; indent-tabs-mode: nil; -*- |
2 | 2 |
* |
3 | 3 |
* This file is a part of LEMON, a generic C++ optimization library. |
4 | 4 |
* |
5 | 5 |
* Copyright (C) 2003-2009 |
6 | 6 |
* Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
7 | 7 |
* (Egervary Research Group on Combinatorial Optimization, EGRES). |
8 | 8 |
* |
9 | 9 |
* Permission to use, modify and distribute this software is granted |
10 | 10 |
* provided that this copyright notice appears in all copies. For |
11 | 11 |
* precise terms see the accompanying LICENSE file. |
12 | 12 |
* |
13 | 13 |
* This software is provided "AS IS" with no warranty of any kind, |
14 | 14 |
* express or implied, and with no claim as to its suitability for any |
15 | 15 |
* purpose. |
16 | 16 |
* |
17 | 17 |
*/ |
18 | 18 |
|
19 | 19 |
#ifndef LEMON_NETWORK_SIMPLEX_H |
20 | 20 |
#define LEMON_NETWORK_SIMPLEX_H |
21 | 21 |
|
22 | 22 |
/// \ingroup min_cost_flow |
23 | 23 |
/// |
24 | 24 |
/// \file |
25 | 25 |
/// \brief Network Simplex algorithm for finding a minimum cost flow. |
26 | 26 |
|
27 | 27 |
#include <vector> |
28 | 28 |
#include <limits> |
29 | 29 |
#include <algorithm> |
30 | 30 |
|
31 | 31 |
#include <lemon/core.h> |
32 | 32 |
#include <lemon/math.h> |
33 | 33 |
#include <lemon/maps.h> |
34 | 34 |
#include <lemon/circulation.h> |
35 | 35 |
#include <lemon/adaptors.h> |
36 | 36 |
|
37 | 37 |
namespace lemon { |
38 | 38 |
|
39 | 39 |
/// \addtogroup min_cost_flow |
40 | 40 |
/// @{ |
41 | 41 |
|
42 | 42 |
/// \brief Implementation of the primal Network Simplex algorithm |
43 | 43 |
/// for finding a \ref min_cost_flow "minimum cost flow". |
44 | 44 |
/// |
45 | 45 |
/// \ref NetworkSimplex implements the primal Network Simplex algorithm |
46 | 46 |
/// for finding a \ref min_cost_flow "minimum cost flow". |
47 | 47 |
/// This algorithm is a specialized version of the linear programming |
48 | 48 |
/// simplex method directly for the minimum cost flow problem. |
49 | 49 |
/// It is one of the most efficient solution methods. |
50 | 50 |
/// |
51 | 51 |
/// In general this class is the fastest implementation available |
52 | 52 |
/// in LEMON for the minimum cost flow problem. |
53 | 53 |
/// Moreover it supports both direction of the supply/demand inequality |
54 | 54 |
/// constraints. For more information see \ref ProblemType. |
55 | 55 |
/// |
56 | 56 |
/// \tparam GR The digraph type the algorithm runs on. |
57 | 57 |
/// \tparam F The value type used for flow amounts, capacity bounds |
58 | 58 |
/// and supply values in the algorithm. By default it is \c int. |
59 | 59 |
/// \tparam C The value type used for costs and potentials in the |
60 | 60 |
/// algorithm. By default it is the same as \c F. |
61 | 61 |
/// |
62 | 62 |
/// \warning Both value types must be signed and all input data must |
63 | 63 |
/// be integer. |
64 | 64 |
/// |
65 | 65 |
/// \note %NetworkSimplex provides five different pivot rule |
66 | 66 |
/// implementations, from which the most efficient one is used |
67 | 67 |
/// by default. For more information see \ref PivotRule. |
68 | 68 |
template <typename GR, typename F = int, typename C = F> |
69 | 69 |
class NetworkSimplex |
70 | 70 |
{ |
71 | 71 |
public: |
72 | 72 |
|
73 | 73 |
/// The flow type of the algorithm |
74 | 74 |
typedef F Flow; |
75 | 75 |
/// The cost type of the algorithm |
76 | 76 |
typedef C Cost; |
77 | 77 |
#ifdef DOXYGEN |
78 | 78 |
/// The type of the flow map |
79 | 79 |
typedef GR::ArcMap<Flow> FlowMap; |
80 | 80 |
/// The type of the potential map |
81 | 81 |
typedef GR::NodeMap<Cost> PotentialMap; |
82 | 82 |
#else |
83 | 83 |
/// The type of the flow map |
84 | 84 |
typedef typename GR::template ArcMap<Flow> FlowMap; |
85 | 85 |
/// The type of the potential map |
86 | 86 |
typedef typename GR::template NodeMap<Cost> PotentialMap; |
87 | 87 |
#endif |
88 | 88 |
|
89 | 89 |
public: |
90 | 90 |
|
91 | 91 |
/// \brief Enum type for selecting the pivot rule. |
92 | 92 |
/// |
93 | 93 |
/// Enum type for selecting the pivot rule for the \ref run() |
94 | 94 |
/// function. |
95 | 95 |
/// |
96 | 96 |
/// \ref NetworkSimplex provides five different pivot rule |
97 | 97 |
/// implementations that significantly affect the running time |
98 | 98 |
/// of the algorithm. |
99 | 99 |
/// By default \ref BLOCK_SEARCH "Block Search" is used, which |
100 | 100 |
/// proved to be the most efficient and the most robust on various |
101 | 101 |
/// test inputs according to our benchmark tests. |
102 | 102 |
/// However another pivot rule can be selected using the \ref run() |
103 | 103 |
/// function with the proper parameter. |
104 | 104 |
enum PivotRule { |
105 | 105 |
|
106 | 106 |
/// The First Eligible pivot rule. |
107 | 107 |
/// The next eligible arc is selected in a wraparound fashion |
108 | 108 |
/// in every iteration. |
109 | 109 |
FIRST_ELIGIBLE, |
110 | 110 |
|
111 | 111 |
/// The Best Eligible pivot rule. |
112 | 112 |
/// The best eligible arc is selected in every iteration. |
113 | 113 |
BEST_ELIGIBLE, |
114 | 114 |
|
115 | 115 |
/// The Block Search pivot rule. |
116 | 116 |
/// A specified number of arcs are examined in every iteration |
117 | 117 |
/// in a wraparound fashion and the best eligible arc is selected |
118 | 118 |
/// from this block. |
119 | 119 |
BLOCK_SEARCH, |
120 | 120 |
|
121 | 121 |
/// The Candidate List pivot rule. |
122 | 122 |
/// In a major iteration a candidate list is built from eligible arcs |
123 | 123 |
/// in a wraparound fashion and in the following minor iterations |
124 | 124 |
/// the best eligible arc is selected from this list. |
125 | 125 |
CANDIDATE_LIST, |
126 | 126 |
|
127 | 127 |
/// The Altering Candidate List pivot rule. |
128 | 128 |
/// It is a modified version of the Candidate List method. |
129 | 129 |
/// It keeps only the several best eligible arcs from the former |
130 | 130 |
/// candidate list and extends this list in every iteration. |
131 | 131 |
ALTERING_LIST |
132 | 132 |
}; |
133 | 133 |
|
134 | 134 |
/// \brief Enum type for selecting the problem type. |
135 | 135 |
/// |
136 | 136 |
/// Enum type for selecting the problem type, i.e. the direction of |
137 | 137 |
/// the inequalities in the supply/demand constraints of the |
138 | 138 |
/// \ref min_cost_flow "minimum cost flow problem". |
139 | 139 |
/// |
140 | 140 |
/// The default problem type is \c GEQ, since this form is supported |
141 | 141 |
/// by other minimum cost flow algorithms and the \ref Circulation |
142 | 142 |
/// algorithm as well. |
143 | 143 |
/// The \c LEQ problem type can be selected using the \ref problemType() |
144 | 144 |
/// function. |
145 | 145 |
/// |
146 | 146 |
/// Note that the equality form is a special case of both problem type. |
147 | 147 |
enum ProblemType { |
148 | 148 |
|
149 | 149 |
/// This option means that there are "<em>greater or equal</em>" |
150 | 150 |
/// constraints in the defintion, i.e. the exact formulation of the |
151 | 151 |
/// problem is the following. |
152 | 152 |
/** |
153 | 153 |
\f[ \min\sum_{uv\in A} f(uv) \cdot cost(uv) \f] |
154 | 154 |
\f[ \sum_{uv\in A} f(uv) - \sum_{vu\in A} f(vu) \geq |
155 | 155 |
sup(u) \quad \forall u\in V \f] |
156 | 156 |
\f[ lower(uv) \leq f(uv) \leq upper(uv) \quad \forall uv\in A \f] |
157 | 157 |
*/ |
158 | 158 |
/// It means that the total demand must be greater or equal to the |
159 | 159 |
/// total supply (i.e. \f$\sum_{u\in V} sup(u)\f$ must be zero or |
160 | 160 |
/// negative) and all the supplies have to be carried out from |
161 | 161 |
/// the supply nodes, but there could be demands that are not |
162 | 162 |
/// satisfied. |
163 | 163 |
GEQ, |
164 | 164 |
/// It is just an alias for the \c GEQ option. |
165 | 165 |
CARRY_SUPPLIES = GEQ, |
166 | 166 |
|
167 | 167 |
/// This option means that there are "<em>less or equal</em>" |
168 | 168 |
/// constraints in the defintion, i.e. the exact formulation of the |
169 | 169 |
/// problem is the following. |
170 | 170 |
/** |
171 | 171 |
\f[ \min\sum_{uv\in A} f(uv) \cdot cost(uv) \f] |
172 | 172 |
\f[ \sum_{uv\in A} f(uv) - \sum_{vu\in A} f(vu) \leq |
173 | 173 |
sup(u) \quad \forall u\in V \f] |
174 | 174 |
\f[ lower(uv) \leq f(uv) \leq upper(uv) \quad \forall uv\in A \f] |
175 | 175 |
*/ |
176 | 176 |
/// It means that the total demand must be less or equal to the |
177 | 177 |
/// total supply (i.e. \f$\sum_{u\in V} sup(u)\f$ must be zero or |
178 | 178 |
/// positive) and all the demands have to be satisfied, but there |
179 | 179 |
/// could be supplies that are not carried out from the supply |
180 | 180 |
/// nodes. |
181 | 181 |
LEQ, |
182 | 182 |
/// It is just an alias for the \c LEQ option. |
183 | 183 |
SATISFY_DEMANDS = LEQ |
184 | 184 |
}; |
185 | 185 |
|
186 | 186 |
private: |
187 | 187 |
|
188 | 188 |
TEMPLATE_DIGRAPH_TYPEDEFS(GR); |
189 | 189 |
|
190 | 190 |
typedef typename GR::template ArcMap<Flow> FlowArcMap; |
191 | 191 |
typedef typename GR::template ArcMap<Cost> CostArcMap; |
192 | 192 |
typedef typename GR::template NodeMap<Flow> FlowNodeMap; |
193 | 193 |
|
194 | 194 |
typedef std::vector<Arc> ArcVector; |
195 | 195 |
typedef std::vector<Node> NodeVector; |
196 | 196 |
typedef std::vector<int> IntVector; |
197 | 197 |
typedef std::vector<bool> BoolVector; |
198 | 198 |
typedef std::vector<Flow> FlowVector; |
199 | 199 |
typedef std::vector<Cost> CostVector; |
200 | 200 |
|
201 | 201 |
// State constants for arcs |
202 | 202 |
enum ArcStateEnum { |
203 | 203 |
STATE_UPPER = -1, |
204 | 204 |
STATE_TREE = 0, |
205 | 205 |
STATE_LOWER = 1 |
206 | 206 |
}; |
207 | 207 |
|
208 | 208 |
private: |
209 | 209 |
|
210 | 210 |
// Data related to the underlying digraph |
211 | 211 |
const GR &_graph; |
212 | 212 |
int _node_num; |
213 | 213 |
int _arc_num; |
214 | 214 |
|
215 | 215 |
// Parameters of the problem |
216 | 216 |
FlowArcMap *_plower; |
217 | 217 |
FlowArcMap *_pupper; |
218 | 218 |
CostArcMap *_pcost; |
219 | 219 |
FlowNodeMap *_psupply; |
220 | 220 |
bool _pstsup; |
221 | 221 |
Node _psource, _ptarget; |
222 | 222 |
Flow _pstflow; |
223 | 223 |
ProblemType _ptype; |
224 | 224 |
|
225 | 225 |
// Result maps |
226 | 226 |
FlowMap *_flow_map; |
227 | 227 |
PotentialMap *_potential_map; |
228 | 228 |
bool _local_flow; |
229 | 229 |
bool _local_potential; |
230 | 230 |
|
231 | 231 |
// Data structures for storing the digraph |
232 | 232 |
IntNodeMap _node_id; |
233 | 233 |
ArcVector _arc_ref; |
234 | 234 |
IntVector _source; |
235 | 235 |
IntVector _target; |
236 | 236 |
|
237 | 237 |
// Node and arc data |
238 | 238 |
FlowVector _cap; |
239 | 239 |
CostVector _cost; |
240 | 240 |
FlowVector _supply; |
241 | 241 |
FlowVector _flow; |
242 | 242 |
CostVector _pi; |
243 | 243 |
|
244 | 244 |
// Data for storing the spanning tree structure |
245 | 245 |
IntVector _parent; |
246 | 246 |
IntVector _pred; |
247 | 247 |
IntVector _thread; |
248 | 248 |
IntVector _rev_thread; |
249 | 249 |
IntVector _succ_num; |
250 | 250 |
IntVector _last_succ; |
251 | 251 |
IntVector _dirty_revs; |
252 | 252 |
BoolVector _forward; |
253 | 253 |
IntVector _state; |
254 | 254 |
int _root; |
255 | 255 |
|
256 | 256 |
// Temporary data used in the current pivot iteration |
257 | 257 |
int in_arc, join, u_in, v_in, u_out, v_out; |
258 | 258 |
int first, second, right, last; |
259 | 259 |
int stem, par_stem, new_stem; |
260 | 260 |
Flow delta; |
261 | 261 |
|
262 | 262 |
private: |
263 | 263 |
|
264 | 264 |
// Implementation of the First Eligible pivot rule |
265 | 265 |
class FirstEligiblePivotRule |
266 | 266 |
{ |
267 | 267 |
private: |
268 | 268 |
|
269 | 269 |
// References to the NetworkSimplex class |
270 | 270 |
const IntVector &_source; |
271 | 271 |
const IntVector &_target; |
272 | 272 |
const CostVector &_cost; |
273 | 273 |
const IntVector &_state; |
274 | 274 |
const CostVector &_pi; |
275 | 275 |
int &_in_arc; |
276 | 276 |
int _arc_num; |
277 | 277 |
|
278 | 278 |
// Pivot rule data |
279 | 279 |
int _next_arc; |
280 | 280 |
|
281 | 281 |
public: |
282 | 282 |
|
283 | 283 |
// Constructor |
284 | 284 |
FirstEligiblePivotRule(NetworkSimplex &ns) : |
285 | 285 |
_source(ns._source), _target(ns._target), |
286 | 286 |
_cost(ns._cost), _state(ns._state), _pi(ns._pi), |
287 | 287 |
_in_arc(ns.in_arc), _arc_num(ns._arc_num), _next_arc(0) |
288 | 288 |
{} |
289 | 289 |
|
290 | 290 |
// Find next entering arc |
291 | 291 |
bool findEnteringArc() { |
292 | 292 |
Cost c; |
293 | 293 |
for (int e = _next_arc; e < _arc_num; ++e) { |
294 | 294 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
295 | 295 |
if (c < 0) { |
296 | 296 |
_in_arc = e; |
297 | 297 |
_next_arc = e + 1; |
298 | 298 |
return true; |
299 | 299 |
} |
300 | 300 |
} |
301 | 301 |
for (int e = 0; e < _next_arc; ++e) { |
302 | 302 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
303 | 303 |
if (c < 0) { |
304 | 304 |
_in_arc = e; |
305 | 305 |
_next_arc = e + 1; |
306 | 306 |
return true; |
307 | 307 |
} |
308 | 308 |
} |
309 | 309 |
return false; |
310 | 310 |
} |
311 | 311 |
|
312 | 312 |
}; //class FirstEligiblePivotRule |
313 | 313 |
|
314 | 314 |
|
315 | 315 |
// Implementation of the Best Eligible pivot rule |
316 | 316 |
class BestEligiblePivotRule |
317 | 317 |
{ |
318 | 318 |
private: |
319 | 319 |
|
320 | 320 |
// References to the NetworkSimplex class |
321 | 321 |
const IntVector &_source; |
322 | 322 |
const IntVector &_target; |
323 | 323 |
const CostVector &_cost; |
324 | 324 |
const IntVector &_state; |
325 | 325 |
const CostVector &_pi; |
326 | 326 |
int &_in_arc; |
327 | 327 |
int _arc_num; |
328 | 328 |
|
329 | 329 |
public: |
330 | 330 |
|
331 | 331 |
// Constructor |
332 | 332 |
BestEligiblePivotRule(NetworkSimplex &ns) : |
333 | 333 |
_source(ns._source), _target(ns._target), |
334 | 334 |
_cost(ns._cost), _state(ns._state), _pi(ns._pi), |
335 | 335 |
_in_arc(ns.in_arc), _arc_num(ns._arc_num) |
336 | 336 |
{} |
337 | 337 |
|
338 | 338 |
// Find next entering arc |
339 | 339 |
bool findEnteringArc() { |
340 | 340 |
Cost c, min = 0; |
341 | 341 |
for (int e = 0; e < _arc_num; ++e) { |
342 | 342 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
343 | 343 |
if (c < min) { |
344 | 344 |
min = c; |
345 | 345 |
_in_arc = e; |
346 | 346 |
} |
347 | 347 |
} |
348 | 348 |
return min < 0; |
349 | 349 |
} |
350 | 350 |
|
351 | 351 |
}; //class BestEligiblePivotRule |
352 | 352 |
|
353 | 353 |
|
354 | 354 |
// Implementation of the Block Search pivot rule |
355 | 355 |
class BlockSearchPivotRule |
356 | 356 |
{ |
357 | 357 |
private: |
358 | 358 |
|
359 | 359 |
// References to the NetworkSimplex class |
360 | 360 |
const IntVector &_source; |
361 | 361 |
const IntVector &_target; |
362 | 362 |
const CostVector &_cost; |
363 | 363 |
const IntVector &_state; |
364 | 364 |
const CostVector &_pi; |
365 | 365 |
int &_in_arc; |
366 | 366 |
int _arc_num; |
367 | 367 |
|
368 | 368 |
// Pivot rule data |
369 | 369 |
int _block_size; |
370 | 370 |
int _next_arc; |
371 | 371 |
|
372 | 372 |
public: |
373 | 373 |
|
374 | 374 |
// Constructor |
375 | 375 |
BlockSearchPivotRule(NetworkSimplex &ns) : |
376 | 376 |
_source(ns._source), _target(ns._target), |
377 | 377 |
_cost(ns._cost), _state(ns._state), _pi(ns._pi), |
378 | 378 |
_in_arc(ns.in_arc), _arc_num(ns._arc_num), _next_arc(0) |
379 | 379 |
{ |
380 | 380 |
// The main parameters of the pivot rule |
381 | 381 |
const double BLOCK_SIZE_FACTOR = 2.0; |
382 | 382 |
const int MIN_BLOCK_SIZE = 10; |
383 | 383 |
|
384 |
_block_size = std::max( int(BLOCK_SIZE_FACTOR * |
|
384 |
_block_size = std::max( int(BLOCK_SIZE_FACTOR * |
|
385 |
std::sqrt(double(_arc_num))), |
|
385 | 386 |
MIN_BLOCK_SIZE ); |
386 | 387 |
} |
387 | 388 |
|
388 | 389 |
// Find next entering arc |
389 | 390 |
bool findEnteringArc() { |
390 | 391 |
Cost c, min = 0; |
391 | 392 |
int cnt = _block_size; |
392 | 393 |
int e, min_arc = _next_arc; |
393 | 394 |
for (e = _next_arc; e < _arc_num; ++e) { |
394 | 395 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
395 | 396 |
if (c < min) { |
396 | 397 |
min = c; |
397 | 398 |
min_arc = e; |
398 | 399 |
} |
399 | 400 |
if (--cnt == 0) { |
400 | 401 |
if (min < 0) break; |
401 | 402 |
cnt = _block_size; |
402 | 403 |
} |
403 | 404 |
} |
404 | 405 |
if (min == 0 || cnt > 0) { |
405 | 406 |
for (e = 0; e < _next_arc; ++e) { |
406 | 407 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
407 | 408 |
if (c < min) { |
408 | 409 |
min = c; |
409 | 410 |
min_arc = e; |
410 | 411 |
} |
411 | 412 |
if (--cnt == 0) { |
412 | 413 |
if (min < 0) break; |
413 | 414 |
cnt = _block_size; |
414 | 415 |
} |
415 | 416 |
} |
416 | 417 |
} |
417 | 418 |
if (min >= 0) return false; |
418 | 419 |
_in_arc = min_arc; |
419 | 420 |
_next_arc = e; |
420 | 421 |
return true; |
421 | 422 |
} |
422 | 423 |
|
423 | 424 |
}; //class BlockSearchPivotRule |
424 | 425 |
|
425 | 426 |
|
426 | 427 |
// Implementation of the Candidate List pivot rule |
427 | 428 |
class CandidateListPivotRule |
428 | 429 |
{ |
429 | 430 |
private: |
430 | 431 |
|
431 | 432 |
// References to the NetworkSimplex class |
432 | 433 |
const IntVector &_source; |
433 | 434 |
const IntVector &_target; |
434 | 435 |
const CostVector &_cost; |
435 | 436 |
const IntVector &_state; |
436 | 437 |
const CostVector &_pi; |
437 | 438 |
int &_in_arc; |
438 | 439 |
int _arc_num; |
439 | 440 |
|
440 | 441 |
// Pivot rule data |
441 | 442 |
IntVector _candidates; |
442 | 443 |
int _list_length, _minor_limit; |
443 | 444 |
int _curr_length, _minor_count; |
444 | 445 |
int _next_arc; |
445 | 446 |
|
446 | 447 |
public: |
447 | 448 |
|
448 | 449 |
/// Constructor |
449 | 450 |
CandidateListPivotRule(NetworkSimplex &ns) : |
450 | 451 |
_source(ns._source), _target(ns._target), |
451 | 452 |
_cost(ns._cost), _state(ns._state), _pi(ns._pi), |
452 | 453 |
_in_arc(ns.in_arc), _arc_num(ns._arc_num), _next_arc(0) |
453 | 454 |
{ |
454 | 455 |
// The main parameters of the pivot rule |
455 | 456 |
const double LIST_LENGTH_FACTOR = 1.0; |
456 | 457 |
const int MIN_LIST_LENGTH = 10; |
457 | 458 |
const double MINOR_LIMIT_FACTOR = 0.1; |
458 | 459 |
const int MIN_MINOR_LIMIT = 3; |
459 | 460 |
|
460 |
_list_length = std::max( int(LIST_LENGTH_FACTOR * |
|
461 |
_list_length = std::max( int(LIST_LENGTH_FACTOR * |
|
462 |
std::sqrt(double(_arc_num))), |
|
461 | 463 |
MIN_LIST_LENGTH ); |
462 | 464 |
_minor_limit = std::max( int(MINOR_LIMIT_FACTOR * _list_length), |
463 | 465 |
MIN_MINOR_LIMIT ); |
464 | 466 |
_curr_length = _minor_count = 0; |
465 | 467 |
_candidates.resize(_list_length); |
466 | 468 |
} |
467 | 469 |
|
468 | 470 |
/// Find next entering arc |
469 | 471 |
bool findEnteringArc() { |
470 | 472 |
Cost min, c; |
471 | 473 |
int e, min_arc = _next_arc; |
472 | 474 |
if (_curr_length > 0 && _minor_count < _minor_limit) { |
473 | 475 |
// Minor iteration: select the best eligible arc from the |
474 | 476 |
// current candidate list |
475 | 477 |
++_minor_count; |
476 | 478 |
min = 0; |
477 | 479 |
for (int i = 0; i < _curr_length; ++i) { |
478 | 480 |
e = _candidates[i]; |
479 | 481 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
480 | 482 |
if (c < min) { |
481 | 483 |
min = c; |
482 | 484 |
min_arc = e; |
483 | 485 |
} |
484 | 486 |
if (c >= 0) { |
485 | 487 |
_candidates[i--] = _candidates[--_curr_length]; |
486 | 488 |
} |
487 | 489 |
} |
488 | 490 |
if (min < 0) { |
489 | 491 |
_in_arc = min_arc; |
490 | 492 |
return true; |
491 | 493 |
} |
492 | 494 |
} |
493 | 495 |
|
494 | 496 |
// Major iteration: build a new candidate list |
495 | 497 |
min = 0; |
496 | 498 |
_curr_length = 0; |
497 | 499 |
for (e = _next_arc; e < _arc_num; ++e) { |
498 | 500 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
499 | 501 |
if (c < 0) { |
500 | 502 |
_candidates[_curr_length++] = e; |
501 | 503 |
if (c < min) { |
502 | 504 |
min = c; |
503 | 505 |
min_arc = e; |
504 | 506 |
} |
505 | 507 |
if (_curr_length == _list_length) break; |
506 | 508 |
} |
507 | 509 |
} |
508 | 510 |
if (_curr_length < _list_length) { |
509 | 511 |
for (e = 0; e < _next_arc; ++e) { |
510 | 512 |
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
511 | 513 |
if (c < 0) { |
512 | 514 |
_candidates[_curr_length++] = e; |
513 | 515 |
if (c < min) { |
514 | 516 |
min = c; |
515 | 517 |
min_arc = e; |
516 | 518 |
} |
517 | 519 |
if (_curr_length == _list_length) break; |
518 | 520 |
} |
519 | 521 |
} |
520 | 522 |
} |
521 | 523 |
if (_curr_length == 0) return false; |
522 | 524 |
_minor_count = 1; |
523 | 525 |
_in_arc = min_arc; |
524 | 526 |
_next_arc = e; |
525 | 527 |
return true; |
526 | 528 |
} |
527 | 529 |
|
528 | 530 |
}; //class CandidateListPivotRule |
529 | 531 |
|
530 | 532 |
|
531 | 533 |
// Implementation of the Altering Candidate List pivot rule |
532 | 534 |
class AlteringListPivotRule |
533 | 535 |
{ |
534 | 536 |
private: |
535 | 537 |
|
536 | 538 |
// References to the NetworkSimplex class |
537 | 539 |
const IntVector &_source; |
538 | 540 |
const IntVector &_target; |
539 | 541 |
const CostVector &_cost; |
540 | 542 |
const IntVector &_state; |
541 | 543 |
const CostVector &_pi; |
542 | 544 |
int &_in_arc; |
543 | 545 |
int _arc_num; |
544 | 546 |
|
545 | 547 |
// Pivot rule data |
546 | 548 |
int _block_size, _head_length, _curr_length; |
547 | 549 |
int _next_arc; |
548 | 550 |
IntVector _candidates; |
549 | 551 |
CostVector _cand_cost; |
550 | 552 |
|
551 | 553 |
// Functor class to compare arcs during sort of the candidate list |
552 | 554 |
class SortFunc |
553 | 555 |
{ |
554 | 556 |
private: |
555 | 557 |
const CostVector &_map; |
556 | 558 |
public: |
557 | 559 |
SortFunc(const CostVector &map) : _map(map) {} |
558 | 560 |
bool operator()(int left, int right) { |
559 | 561 |
return _map[left] > _map[right]; |
560 | 562 |
} |
561 | 563 |
}; |
562 | 564 |
|
563 | 565 |
SortFunc _sort_func; |
564 | 566 |
|
565 | 567 |
public: |
566 | 568 |
|
567 | 569 |
// Constructor |
568 | 570 |
AlteringListPivotRule(NetworkSimplex &ns) : |
569 | 571 |
_source(ns._source), _target(ns._target), |
570 | 572 |
_cost(ns._cost), _state(ns._state), _pi(ns._pi), |
571 | 573 |
_in_arc(ns.in_arc), _arc_num(ns._arc_num), |
572 | 574 |
_next_arc(0), _cand_cost(ns._arc_num), _sort_func(_cand_cost) |
573 | 575 |
{ |
574 | 576 |
// The main parameters of the pivot rule |
575 | 577 |
const double BLOCK_SIZE_FACTOR = 1.5; |
576 | 578 |
const int MIN_BLOCK_SIZE = 10; |
577 | 579 |
const double HEAD_LENGTH_FACTOR = 0.1; |
578 | 580 |
const int MIN_HEAD_LENGTH = 3; |
579 | 581 |
|
580 |
_block_size = std::max( int(BLOCK_SIZE_FACTOR * |
|
582 |
_block_size = std::max( int(BLOCK_SIZE_FACTOR * |
|
583 |
std::sqrt(double(_arc_num))), |
|
581 | 584 |
MIN_BLOCK_SIZE ); |
582 | 585 |
_head_length = std::max( int(HEAD_LENGTH_FACTOR * _block_size), |
583 | 586 |
MIN_HEAD_LENGTH ); |
584 | 587 |
_candidates.resize(_head_length + _block_size); |
585 | 588 |
_curr_length = 0; |
586 | 589 |
} |
587 | 590 |
|
588 | 591 |
// Find next entering arc |
589 | 592 |
bool findEnteringArc() { |
590 | 593 |
// Check the current candidate list |
591 | 594 |
int e; |
592 | 595 |
for (int i = 0; i < _curr_length; ++i) { |
593 | 596 |
e = _candidates[i]; |
594 | 597 |
_cand_cost[e] = _state[e] * |
595 | 598 |
(_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
596 | 599 |
if (_cand_cost[e] >= 0) { |
597 | 600 |
_candidates[i--] = _candidates[--_curr_length]; |
598 | 601 |
} |
599 | 602 |
} |
600 | 603 |
|
601 | 604 |
// Extend the list |
602 | 605 |
int cnt = _block_size; |
603 | 606 |
int last_arc = 0; |
604 | 607 |
int limit = _head_length; |
605 | 608 |
|
606 | 609 |
for (int e = _next_arc; e < _arc_num; ++e) { |
607 | 610 |
_cand_cost[e] = _state[e] * |
608 | 611 |
(_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
609 | 612 |
if (_cand_cost[e] < 0) { |
610 | 613 |
_candidates[_curr_length++] = e; |
611 | 614 |
last_arc = e; |
612 | 615 |
} |
613 | 616 |
if (--cnt == 0) { |
614 | 617 |
if (_curr_length > limit) break; |
615 | 618 |
limit = 0; |
616 | 619 |
cnt = _block_size; |
617 | 620 |
} |
618 | 621 |
} |
619 | 622 |
if (_curr_length <= limit) { |
620 | 623 |
for (int e = 0; e < _next_arc; ++e) { |
621 | 624 |
_cand_cost[e] = _state[e] * |
622 | 625 |
(_cost[e] + _pi[_source[e]] - _pi[_target[e]]); |
623 | 626 |
if (_cand_cost[e] < 0) { |
624 | 627 |
_candidates[_curr_length++] = e; |
625 | 628 |
last_arc = e; |
626 | 629 |
} |
627 | 630 |
if (--cnt == 0) { |
628 | 631 |
if (_curr_length > limit) break; |
629 | 632 |
limit = 0; |
630 | 633 |
cnt = _block_size; |
631 | 634 |
} |
632 | 635 |
} |
633 | 636 |
} |
634 | 637 |
if (_curr_length == 0) return false; |
635 | 638 |
_next_arc = last_arc + 1; |
636 | 639 |
|
637 | 640 |
// Make heap of the candidate list (approximating a partial sort) |
638 | 641 |
make_heap( _candidates.begin(), _candidates.begin() + _curr_length, |
639 | 642 |
_sort_func ); |
640 | 643 |
|
641 | 644 |
// Pop the first element of the heap |
642 | 645 |
_in_arc = _candidates[0]; |
643 | 646 |
pop_heap( _candidates.begin(), _candidates.begin() + _curr_length, |
644 | 647 |
_sort_func ); |
645 | 648 |
_curr_length = std::min(_head_length, _curr_length - 1); |
646 | 649 |
return true; |
647 | 650 |
} |
648 | 651 |
|
649 | 652 |
}; //class AlteringListPivotRule |
650 | 653 |
|
651 | 654 |
public: |
652 | 655 |
|
653 | 656 |
/// \brief Constructor. |
654 | 657 |
/// |
655 | 658 |
/// The constructor of the class. |
656 | 659 |
/// |
657 | 660 |
/// \param graph The digraph the algorithm runs on. |
658 | 661 |
NetworkSimplex(const GR& graph) : |
659 | 662 |
_graph(graph), |
660 | 663 |
_plower(NULL), _pupper(NULL), _pcost(NULL), |
661 | 664 |
_psupply(NULL), _pstsup(false), _ptype(GEQ), |
662 | 665 |
_flow_map(NULL), _potential_map(NULL), |
663 | 666 |
_local_flow(false), _local_potential(false), |
664 | 667 |
_node_id(graph) |
665 | 668 |
{ |
666 | 669 |
LEMON_ASSERT(std::numeric_limits<Flow>::is_integer && |
667 | 670 |
std::numeric_limits<Flow>::is_signed, |
668 | 671 |
"The flow type of NetworkSimplex must be signed integer"); |
669 | 672 |
LEMON_ASSERT(std::numeric_limits<Cost>::is_integer && |
670 | 673 |
std::numeric_limits<Cost>::is_signed, |
671 | 674 |
"The cost type of NetworkSimplex must be signed integer"); |
672 | 675 |
} |
673 | 676 |
|
674 | 677 |
/// Destructor. |
675 | 678 |
~NetworkSimplex() { |
676 | 679 |
if (_local_flow) delete _flow_map; |
677 | 680 |
if (_local_potential) delete _potential_map; |
678 | 681 |
} |
679 | 682 |
|
680 | 683 |
/// \name Parameters |
681 | 684 |
/// The parameters of the algorithm can be specified using these |
682 | 685 |
/// functions. |
683 | 686 |
|
684 | 687 |
/// @{ |
685 | 688 |
|
686 | 689 |
/// \brief Set the lower bounds on the arcs. |
687 | 690 |
/// |
688 | 691 |
/// This function sets the lower bounds on the arcs. |
689 | 692 |
/// If neither this function nor \ref boundMaps() is used before |
690 | 693 |
/// calling \ref run(), the lower bounds will be set to zero |
691 | 694 |
/// on all arcs. |
692 | 695 |
/// |
693 | 696 |
/// \param map An arc map storing the lower bounds. |
694 | 697 |
/// Its \c Value type must be convertible to the \c Flow type |
695 | 698 |
/// of the algorithm. |
696 | 699 |
/// |
697 | 700 |
/// \return <tt>(*this)</tt> |
698 | 701 |
template <typename LOWER> |
699 | 702 |
NetworkSimplex& lowerMap(const LOWER& map) { |
700 | 703 |
delete _plower; |
701 | 704 |
_plower = new FlowArcMap(_graph); |
702 | 705 |
for (ArcIt a(_graph); a != INVALID; ++a) { |
703 | 706 |
(*_plower)[a] = map[a]; |
704 | 707 |
} |
705 | 708 |
return *this; |
706 | 709 |
} |
707 | 710 |
|
708 | 711 |
/// \brief Set the upper bounds (capacities) on the arcs. |
709 | 712 |
/// |
710 | 713 |
/// This function sets the upper bounds (capacities) on the arcs. |
711 | 714 |
/// If none of the functions \ref upperMap(), \ref capacityMap() |
712 | 715 |
/// and \ref boundMaps() is used before calling \ref run(), |
713 | 716 |
/// the upper bounds (capacities) will be set to |
714 | 717 |
/// \c std::numeric_limits<Flow>::max() on all arcs. |
715 | 718 |
/// |
716 | 719 |
/// \param map An arc map storing the upper bounds. |
717 | 720 |
/// Its \c Value type must be convertible to the \c Flow type |
718 | 721 |
/// of the algorithm. |
719 | 722 |
/// |
720 | 723 |
/// \return <tt>(*this)</tt> |
721 | 724 |
template<typename UPPER> |
722 | 725 |
NetworkSimplex& upperMap(const UPPER& map) { |
723 | 726 |
delete _pupper; |
724 | 727 |
_pupper = new FlowArcMap(_graph); |
725 | 728 |
for (ArcIt a(_graph); a != INVALID; ++a) { |
726 | 729 |
(*_pupper)[a] = map[a]; |
727 | 730 |
} |
728 | 731 |
return *this; |
729 | 732 |
} |
730 | 733 |
|
731 | 734 |
/// \brief Set the upper bounds (capacities) on the arcs. |
732 | 735 |
/// |
733 | 736 |
/// This function sets the upper bounds (capacities) on the arcs. |
734 | 737 |
/// It is just an alias for \ref upperMap(). |
735 | 738 |
/// |
736 | 739 |
/// \return <tt>(*this)</tt> |
737 | 740 |
template<typename CAP> |
738 | 741 |
NetworkSimplex& capacityMap(const CAP& map) { |
739 | 742 |
return upperMap(map); |
740 | 743 |
} |
741 | 744 |
|
742 | 745 |
/// \brief Set the lower and upper bounds on the arcs. |
743 | 746 |
/// |
744 | 747 |
/// This function sets the lower and upper bounds on the arcs. |
745 | 748 |
/// If neither this function nor \ref lowerMap() is used before |
746 | 749 |
/// calling \ref run(), the lower bounds will be set to zero |
747 | 750 |
/// on all arcs. |
748 | 751 |
/// If none of the functions \ref upperMap(), \ref capacityMap() |
749 | 752 |
/// and \ref boundMaps() is used before calling \ref run(), |
750 | 753 |
/// the upper bounds (capacities) will be set to |
751 | 754 |
/// \c std::numeric_limits<Flow>::max() on all arcs. |
752 | 755 |
/// |
753 | 756 |
/// \param lower An arc map storing the lower bounds. |
754 | 757 |
/// \param upper An arc map storing the upper bounds. |
755 | 758 |
/// |
756 | 759 |
/// The \c Value type of the maps must be convertible to the |
757 | 760 |
/// \c Flow type of the algorithm. |
758 | 761 |
/// |
759 | 762 |
/// \note This function is just a shortcut of calling \ref lowerMap() |
760 | 763 |
/// and \ref upperMap() separately. |
761 | 764 |
/// |
762 | 765 |
/// \return <tt>(*this)</tt> |
763 | 766 |
template <typename LOWER, typename UPPER> |
764 | 767 |
NetworkSimplex& boundMaps(const LOWER& lower, const UPPER& upper) { |
765 | 768 |
return lowerMap(lower).upperMap(upper); |
766 | 769 |
} |
767 | 770 |
|
768 | 771 |
/// \brief Set the costs of the arcs. |
769 | 772 |
/// |
770 | 773 |
/// This function sets the costs of the arcs. |
771 | 774 |
/// If it is not used before calling \ref run(), the costs |
772 | 775 |
/// will be set to \c 1 on all arcs. |
773 | 776 |
/// |
774 | 777 |
/// \param map An arc map storing the costs. |
775 | 778 |
/// Its \c Value type must be convertible to the \c Cost type |
776 | 779 |
/// of the algorithm. |
777 | 780 |
/// |
778 | 781 |
/// \return <tt>(*this)</tt> |
779 | 782 |
template<typename COST> |
780 | 783 |
NetworkSimplex& costMap(const COST& map) { |
781 | 784 |
delete _pcost; |
782 | 785 |
_pcost = new CostArcMap(_graph); |
783 | 786 |
for (ArcIt a(_graph); a != INVALID; ++a) { |
784 | 787 |
(*_pcost)[a] = map[a]; |
785 | 788 |
} |
786 | 789 |
return *this; |
787 | 790 |
} |
788 | 791 |
|
789 | 792 |
/// \brief Set the supply values of the nodes. |
790 | 793 |
/// |
791 | 794 |
/// This function sets the supply values of the nodes. |
792 | 795 |
/// If neither this function nor \ref stSupply() is used before |
793 | 796 |
/// calling \ref run(), the supply of each node will be set to zero. |
794 | 797 |
/// (It makes sense only if non-zero lower bounds are given.) |
795 | 798 |
/// |
796 | 799 |
/// \param map A node map storing the supply values. |
797 | 800 |
/// Its \c Value type must be convertible to the \c Flow type |
798 | 801 |
/// of the algorithm. |
799 | 802 |
/// |
800 | 803 |
/// \return <tt>(*this)</tt> |
801 | 804 |
template<typename SUP> |
802 | 805 |
NetworkSimplex& supplyMap(const SUP& map) { |
803 | 806 |
delete _psupply; |
804 | 807 |
_pstsup = false; |
805 | 808 |
_psupply = new FlowNodeMap(_graph); |
806 | 809 |
for (NodeIt n(_graph); n != INVALID; ++n) { |
807 | 810 |
(*_psupply)[n] = map[n]; |
808 | 811 |
} |
809 | 812 |
return *this; |
810 | 813 |
} |
811 | 814 |
|
812 | 815 |
/// \brief Set single source and target nodes and a supply value. |
813 | 816 |
/// |
814 | 817 |
/// This function sets a single source node and a single target node |
815 | 818 |
/// and the required flow value. |
816 | 819 |
/// If neither this function nor \ref supplyMap() is used before |
817 | 820 |
/// calling \ref run(), the supply of each node will be set to zero. |
818 | 821 |
/// (It makes sense only if non-zero lower bounds are given.) |
819 | 822 |
/// |
820 | 823 |
/// \param s The source node. |
821 | 824 |
/// \param t The target node. |
822 | 825 |
/// \param k The required amount of flow from node \c s to node \c t |
823 | 826 |
/// (i.e. the supply of \c s and the demand of \c t). |
824 | 827 |
/// |
825 | 828 |
/// \return <tt>(*this)</tt> |
826 | 829 |
NetworkSimplex& stSupply(const Node& s, const Node& t, Flow k) { |
827 | 830 |
delete _psupply; |
828 | 831 |
_psupply = NULL; |
829 | 832 |
_pstsup = true; |
830 | 833 |
_psource = s; |
831 | 834 |
_ptarget = t; |
832 | 835 |
_pstflow = k; |
833 | 836 |
return *this; |
834 | 837 |
} |
835 | 838 |
|
836 | 839 |
/// \brief Set the problem type. |
837 | 840 |
/// |
838 | 841 |
/// This function sets the problem type for the algorithm. |
839 | 842 |
/// If it is not used before calling \ref run(), the \ref GEQ problem |
840 | 843 |
/// type will be used. |
841 | 844 |
/// |
842 | 845 |
/// For more information see \ref ProblemType. |
843 | 846 |
/// |
844 | 847 |
/// \return <tt>(*this)</tt> |
845 | 848 |
NetworkSimplex& problemType(ProblemType problem_type) { |
846 | 849 |
_ptype = problem_type; |
847 | 850 |
return *this; |
848 | 851 |
} |
849 | 852 |
|
850 | 853 |
/// \brief Set the flow map. |
851 | 854 |
/// |
852 | 855 |
/// This function sets the flow map. |
853 | 856 |
/// If it is not used before calling \ref run(), an instance will |
854 | 857 |
/// be allocated automatically. The destructor deallocates this |
855 | 858 |
/// automatically allocated map, of course. |
856 | 859 |
/// |
857 | 860 |
/// \return <tt>(*this)</tt> |
858 | 861 |
NetworkSimplex& flowMap(FlowMap& map) { |
859 | 862 |
if (_local_flow) { |
860 | 863 |
delete _flow_map; |
861 | 864 |
_local_flow = false; |
862 | 865 |
} |
863 | 866 |
_flow_map = ↦ |
864 | 867 |
return *this; |
865 | 868 |
} |
866 | 869 |
|
867 | 870 |
/// \brief Set the potential map. |
868 | 871 |
/// |
869 | 872 |
/// This function sets the potential map, which is used for storing |
870 | 873 |
/// the dual solution. |
871 | 874 |
/// If it is not used before calling \ref run(), an instance will |
872 | 875 |
/// be allocated automatically. The destructor deallocates this |
873 | 876 |
/// automatically allocated map, of course. |
874 | 877 |
/// |
875 | 878 |
/// \return <tt>(*this)</tt> |
876 | 879 |
NetworkSimplex& potentialMap(PotentialMap& map) { |
877 | 880 |
if (_local_potential) { |
878 | 881 |
delete _potential_map; |
879 | 882 |
_local_potential = false; |
880 | 883 |
} |
881 | 884 |
_potential_map = ↦ |
882 | 885 |
return *this; |
883 | 886 |
} |
884 | 887 |
|
885 | 888 |
/// @} |
886 | 889 |
|
887 | 890 |
/// \name Execution Control |
888 | 891 |
/// The algorithm can be executed using \ref run(). |
889 | 892 |
|
890 | 893 |
/// @{ |
891 | 894 |
|
892 | 895 |
/// \brief Run the algorithm. |
893 | 896 |
/// |
894 | 897 |
/// This function runs the algorithm. |
895 | 898 |
/// The paramters can be specified using functions \ref lowerMap(), |
896 | 899 |
/// \ref upperMap(), \ref capacityMap(), \ref boundMaps(), |
897 | 900 |
/// \ref costMap(), \ref supplyMap(), \ref stSupply(), |
898 | 901 |
/// \ref problemType(), \ref flowMap() and \ref potentialMap(). |
899 | 902 |
/// For example, |
900 | 903 |
/// \code |
901 | 904 |
/// NetworkSimplex<ListDigraph> ns(graph); |
902 | 905 |
/// ns.boundMaps(lower, upper).costMap(cost) |
903 | 906 |
/// .supplyMap(sup).run(); |
904 | 907 |
/// \endcode |
905 | 908 |
/// |
906 | 909 |
/// This function can be called more than once. All the parameters |
907 | 910 |
/// that have been given are kept for the next call, unless |
908 | 911 |
/// \ref reset() is called, thus only the modified parameters |
909 | 912 |
/// have to be set again. See \ref reset() for examples. |
910 | 913 |
/// |
911 | 914 |
/// \param pivot_rule The pivot rule that will be used during the |
912 | 915 |
/// algorithm. For more information see \ref PivotRule. |
913 | 916 |
/// |
914 | 917 |
/// \return \c true if a feasible flow can be found. |
915 | 918 |
bool run(PivotRule pivot_rule = BLOCK_SEARCH) { |
916 | 919 |
return init() && start(pivot_rule); |
917 | 920 |
} |
918 | 921 |
|
919 | 922 |
/// \brief Reset all the parameters that have been given before. |
920 | 923 |
/// |
921 | 924 |
/// This function resets all the paramaters that have been given |
922 | 925 |
/// before using functions \ref lowerMap(), \ref upperMap(), |
923 | 926 |
/// \ref capacityMap(), \ref boundMaps(), \ref costMap(), |
924 | 927 |
/// \ref supplyMap(), \ref stSupply(), \ref problemType(), |
925 | 928 |
/// \ref flowMap() and \ref potentialMap(). |
926 | 929 |
/// |
927 | 930 |
/// It is useful for multiple run() calls. If this function is not |
928 | 931 |
/// used, all the parameters given before are kept for the next |
929 | 932 |
/// \ref run() call. |
930 | 933 |
/// |
931 | 934 |
/// For example, |
932 | 935 |
/// \code |
933 | 936 |
/// NetworkSimplex<ListDigraph> ns(graph); |
934 | 937 |
/// |
935 | 938 |
/// // First run |
936 | 939 |
/// ns.lowerMap(lower).capacityMap(cap).costMap(cost) |
937 | 940 |
/// .supplyMap(sup).run(); |
938 | 941 |
/// |
939 | 942 |
/// // Run again with modified cost map (reset() is not called, |
940 | 943 |
/// // so only the cost map have to be set again) |
941 | 944 |
/// cost[e] += 100; |
942 | 945 |
/// ns.costMap(cost).run(); |
943 | 946 |
/// |
944 | 947 |
/// // Run again from scratch using reset() |
945 | 948 |
/// // (the lower bounds will be set to zero on all arcs) |
946 | 949 |
/// ns.reset(); |
947 | 950 |
/// ns.capacityMap(cap).costMap(cost) |
948 | 951 |
/// .supplyMap(sup).run(); |
949 | 952 |
/// \endcode |
950 | 953 |
/// |
951 | 954 |
/// \return <tt>(*this)</tt> |
952 | 955 |
NetworkSimplex& reset() { |
953 | 956 |
delete _plower; |
954 | 957 |
delete _pupper; |
955 | 958 |
delete _pcost; |
956 | 959 |
delete _psupply; |
957 | 960 |
_plower = NULL; |
958 | 961 |
_pupper = NULL; |
959 | 962 |
_pcost = NULL; |
960 | 963 |
_psupply = NULL; |
961 | 964 |
_pstsup = false; |
962 | 965 |
_ptype = GEQ; |
963 | 966 |
if (_local_flow) delete _flow_map; |
964 | 967 |
if (_local_potential) delete _potential_map; |
965 | 968 |
_flow_map = NULL; |
966 | 969 |
_potential_map = NULL; |
967 | 970 |
_local_flow = false; |
968 | 971 |
_local_potential = false; |
969 | 972 |
|
970 | 973 |
return *this; |
971 | 974 |
} |
972 | 975 |
|
973 | 976 |
/// @} |
974 | 977 |
|
975 | 978 |
/// \name Query Functions |
976 | 979 |
/// The results of the algorithm can be obtained using these |
977 | 980 |
/// functions.\n |
978 | 981 |
/// The \ref run() function must be called before using them. |
979 | 982 |
|
980 | 983 |
/// @{ |
981 | 984 |
|
982 | 985 |
/// \brief Return the total cost of the found flow. |
983 | 986 |
/// |
984 | 987 |
/// This function returns the total cost of the found flow. |
985 | 988 |
/// The complexity of the function is O(e). |
986 | 989 |
/// |
987 | 990 |
/// \note The return type of the function can be specified as a |
988 | 991 |
/// template parameter. For example, |
989 | 992 |
/// \code |
990 | 993 |
/// ns.totalCost<double>(); |
991 | 994 |
/// \endcode |
992 | 995 |
/// It is useful if the total cost cannot be stored in the \c Cost |
993 | 996 |
/// type of the algorithm, which is the default return type of the |
994 | 997 |
/// function. |
995 | 998 |
/// |
996 | 999 |
/// \pre \ref run() must be called before using this function. |
997 | 1000 |
template <typename Num> |
998 | 1001 |
Num totalCost() const { |
999 | 1002 |
Num c = 0; |
1000 | 1003 |
if (_pcost) { |
1001 | 1004 |
for (ArcIt e(_graph); e != INVALID; ++e) |
1002 | 1005 |
c += (*_flow_map)[e] * (*_pcost)[e]; |
1003 | 1006 |
} else { |
1004 | 1007 |
for (ArcIt e(_graph); e != INVALID; ++e) |
1005 | 1008 |
c += (*_flow_map)[e]; |
1006 | 1009 |
} |
1007 | 1010 |
return c; |
1008 | 1011 |
} |
1009 | 1012 |
|
1010 | 1013 |
#ifndef DOXYGEN |
1011 | 1014 |
Cost totalCost() const { |
1012 | 1015 |
return totalCost<Cost>(); |
1013 | 1016 |
} |
1014 | 1017 |
#endif |
1015 | 1018 |
|
1016 | 1019 |
/// \brief Return the flow on the given arc. |
1017 | 1020 |
/// |
1018 | 1021 |
/// This function returns the flow on the given arc. |
1019 | 1022 |
/// |
1020 | 1023 |
/// \pre \ref run() must be called before using this function. |
1021 | 1024 |
Flow flow(const Arc& a) const { |
1022 | 1025 |
return (*_flow_map)[a]; |
1023 | 1026 |
} |
1024 | 1027 |
|
1025 | 1028 |
/// \brief Return a const reference to the flow map. |
1026 | 1029 |
/// |
1027 | 1030 |
/// This function returns a const reference to an arc map storing |
1028 | 1031 |
/// the found flow. |
1029 | 1032 |
/// |
1030 | 1033 |
/// \pre \ref run() must be called before using this function. |
1031 | 1034 |
const FlowMap& flowMap() const { |
1032 | 1035 |
return *_flow_map; |
1033 | 1036 |
} |
1034 | 1037 |
|
1035 | 1038 |
/// \brief Return the potential (dual value) of the given node. |
1036 | 1039 |
/// |
1037 | 1040 |
/// This function returns the potential (dual value) of the |
1038 | 1041 |
/// given node. |
1039 | 1042 |
/// |
1040 | 1043 |
/// \pre \ref run() must be called before using this function. |
1041 | 1044 |
Cost potential(const Node& n) const { |
1042 | 1045 |
return (*_potential_map)[n]; |
1043 | 1046 |
} |
1044 | 1047 |
|
1045 | 1048 |
/// \brief Return a const reference to the potential map |
1046 | 1049 |
/// (the dual solution). |
1047 | 1050 |
/// |
1048 | 1051 |
/// This function returns a const reference to a node map storing |
1049 | 1052 |
/// the found potentials, which form the dual solution of the |
1050 | 1053 |
/// \ref min_cost_flow "minimum cost flow" problem. |
1051 | 1054 |
/// |
1052 | 1055 |
/// \pre \ref run() must be called before using this function. |
1053 | 1056 |
const PotentialMap& potentialMap() const { |
1054 | 1057 |
return *_potential_map; |
1055 | 1058 |
} |
1056 | 1059 |
|
1057 | 1060 |
/// @} |
1058 | 1061 |
|
1059 | 1062 |
private: |
1060 | 1063 |
|
1061 | 1064 |
// Initialize internal data structures |
1062 | 1065 |
bool init() { |
1063 | 1066 |
// Initialize result maps |
1064 | 1067 |
if (!_flow_map) { |
1065 | 1068 |
_flow_map = new FlowMap(_graph); |
1066 | 1069 |
_local_flow = true; |
1067 | 1070 |
} |
1068 | 1071 |
if (!_potential_map) { |
1069 | 1072 |
_potential_map = new PotentialMap(_graph); |
1070 | 1073 |
_local_potential = true; |
1071 | 1074 |
} |
1072 | 1075 |
|
1073 | 1076 |
// Initialize vectors |
1074 | 1077 |
_node_num = countNodes(_graph); |
1075 | 1078 |
_arc_num = countArcs(_graph); |
1076 | 1079 |
int all_node_num = _node_num + 1; |
1077 | 1080 |
int all_arc_num = _arc_num + _node_num; |
1078 | 1081 |
if (_node_num == 0) return false; |
1079 | 1082 |
|
1080 | 1083 |
_arc_ref.resize(_arc_num); |
1081 | 1084 |
_source.resize(all_arc_num); |
1082 | 1085 |
_target.resize(all_arc_num); |
1083 | 1086 |
|
1084 | 1087 |
_cap.resize(all_arc_num); |
1085 | 1088 |
_cost.resize(all_arc_num); |
1086 | 1089 |
_supply.resize(all_node_num); |
1087 | 1090 |
_flow.resize(all_arc_num); |
1088 | 1091 |
_pi.resize(all_node_num); |
1089 | 1092 |
|
1090 | 1093 |
_parent.resize(all_node_num); |
1091 | 1094 |
_pred.resize(all_node_num); |
1092 | 1095 |
_forward.resize(all_node_num); |
1093 | 1096 |
_thread.resize(all_node_num); |
1094 | 1097 |
_rev_thread.resize(all_node_num); |
1095 | 1098 |
_succ_num.resize(all_node_num); |
1096 | 1099 |
_last_succ.resize(all_node_num); |
1097 | 1100 |
_state.resize(all_arc_num); |
1098 | 1101 |
|
1099 | 1102 |
// Initialize node related data |
1100 | 1103 |
bool valid_supply = true; |
1101 | 1104 |
Flow sum_supply = 0; |
1102 | 1105 |
if (!_pstsup && !_psupply) { |
1103 | 1106 |
_pstsup = true; |
1104 | 1107 |
_psource = _ptarget = NodeIt(_graph); |
1105 | 1108 |
_pstflow = 0; |
1106 | 1109 |
} |
1107 | 1110 |
if (_psupply) { |
1108 | 1111 |
int i = 0; |
1109 | 1112 |
for (NodeIt n(_graph); n != INVALID; ++n, ++i) { |
1110 | 1113 |
_node_id[n] = i; |
1111 | 1114 |
_supply[i] = (*_psupply)[n]; |
1112 | 1115 |
sum_supply += _supply[i]; |
1113 | 1116 |
} |
1114 | 1117 |
valid_supply = (_ptype == GEQ && sum_supply <= 0) || |
1115 | 1118 |
(_ptype == LEQ && sum_supply >= 0); |
1116 | 1119 |
} else { |
1117 | 1120 |
int i = 0; |
1118 | 1121 |
for (NodeIt n(_graph); n != INVALID; ++n, ++i) { |
1119 | 1122 |
_node_id[n] = i; |
1120 | 1123 |
_supply[i] = 0; |
1121 | 1124 |
} |
1122 | 1125 |
_supply[_node_id[_psource]] = _pstflow; |
1123 | 1126 |
_supply[_node_id[_ptarget]] = -_pstflow; |
1124 | 1127 |
} |
1125 | 1128 |
if (!valid_supply) return false; |
1126 | 1129 |
|
1127 | 1130 |
// Infinite capacity value |
1128 | 1131 |
Flow inf_cap = |
1129 | 1132 |
std::numeric_limits<Flow>::has_infinity ? |
1130 | 1133 |
std::numeric_limits<Flow>::infinity() : |
1131 | 1134 |
std::numeric_limits<Flow>::max(); |
1132 | 1135 |
|
1133 | 1136 |
// Initialize artifical cost |
1134 | 1137 |
Cost art_cost; |
1135 | 1138 |
if (std::numeric_limits<Cost>::is_exact) { |
1136 | 1139 |
art_cost = std::numeric_limits<Cost>::max() / 4 + 1; |
1137 | 1140 |
} else { |
1138 | 1141 |
art_cost = std::numeric_limits<Cost>::min(); |
1139 | 1142 |
for (int i = 0; i != _arc_num; ++i) { |
1140 | 1143 |
if (_cost[i] > art_cost) art_cost = _cost[i]; |
1141 | 1144 |
} |
1142 | 1145 |
art_cost = (art_cost + 1) * _node_num; |
1143 | 1146 |
} |
1144 | 1147 |
|
1145 | 1148 |
// Run Circulation to check if a feasible solution exists |
1146 | 1149 |
typedef ConstMap<Arc, Flow> ConstArcMap; |
1147 | 1150 |
FlowNodeMap *csup = NULL; |
1148 | 1151 |
bool local_csup = false; |
1149 | 1152 |
if (_psupply) { |
1150 | 1153 |
csup = _psupply; |
1151 | 1154 |
} else { |
1152 | 1155 |
csup = new FlowNodeMap(_graph, 0); |
1153 | 1156 |
(*csup)[_psource] = _pstflow; |
1154 | 1157 |
(*csup)[_ptarget] = -_pstflow; |
1155 | 1158 |
local_csup = true; |
1156 | 1159 |
} |
1157 | 1160 |
bool circ_result = false; |
1158 | 1161 |
if (_ptype == GEQ || (_ptype == LEQ && sum_supply == 0)) { |
1159 | 1162 |
// GEQ problem type |
1160 | 1163 |
if (_plower) { |
1161 | 1164 |
if (_pupper) { |
1162 | 1165 |
Circulation<GR, FlowArcMap, FlowArcMap, FlowNodeMap> |
1163 | 1166 |
circ(_graph, *_plower, *_pupper, *csup); |
1164 | 1167 |
circ_result = circ.run(); |
1165 | 1168 |
} else { |
1166 | 1169 |
Circulation<GR, FlowArcMap, ConstArcMap, FlowNodeMap> |
1167 | 1170 |
circ(_graph, *_plower, ConstArcMap(inf_cap), *csup); |
1168 | 1171 |
circ_result = circ.run(); |
1169 | 1172 |
} |
1170 | 1173 |
} else { |
1171 | 1174 |
if (_pupper) { |
1172 | 1175 |
Circulation<GR, ConstArcMap, FlowArcMap, FlowNodeMap> |
1173 | 1176 |
circ(_graph, ConstArcMap(0), *_pupper, *csup); |
1174 | 1177 |
circ_result = circ.run(); |
1175 | 1178 |
} else { |
1176 | 1179 |
Circulation<GR, ConstArcMap, ConstArcMap, FlowNodeMap> |
1177 | 1180 |
circ(_graph, ConstArcMap(0), ConstArcMap(inf_cap), *csup); |
1178 | 1181 |
circ_result = circ.run(); |
1179 | 1182 |
} |
1180 | 1183 |
} |
1181 | 1184 |
} else { |
1182 | 1185 |
// LEQ problem type |
1183 | 1186 |
typedef ReverseDigraph<const GR> RevGraph; |
1184 | 1187 |
typedef NegMap<FlowNodeMap> NegNodeMap; |
1185 | 1188 |
RevGraph rgraph(_graph); |
1186 | 1189 |
NegNodeMap neg_csup(*csup); |
1187 | 1190 |
if (_plower) { |
1188 | 1191 |
if (_pupper) { |
1189 | 1192 |
Circulation<RevGraph, FlowArcMap, FlowArcMap, NegNodeMap> |
1190 | 1193 |
circ(rgraph, *_plower, *_pupper, neg_csup); |
1191 | 1194 |
circ_result = circ.run(); |
1192 | 1195 |
} else { |
1193 | 1196 |
Circulation<RevGraph, FlowArcMap, ConstArcMap, NegNodeMap> |
1194 | 1197 |
circ(rgraph, *_plower, ConstArcMap(inf_cap), neg_csup); |
1195 | 1198 |
circ_result = circ.run(); |
1196 | 1199 |
} |
1197 | 1200 |
} else { |
1198 | 1201 |
if (_pupper) { |
1199 | 1202 |
Circulation<RevGraph, ConstArcMap, FlowArcMap, NegNodeMap> |
1200 | 1203 |
circ(rgraph, ConstArcMap(0), *_pupper, neg_csup); |
1201 | 1204 |
circ_result = circ.run(); |
1202 | 1205 |
} else { |
1203 | 1206 |
Circulation<RevGraph, ConstArcMap, ConstArcMap, NegNodeMap> |
1204 | 1207 |
circ(rgraph, ConstArcMap(0), ConstArcMap(inf_cap), neg_csup); |
1205 | 1208 |
circ_result = circ.run(); |
1206 | 1209 |
} |
1207 | 1210 |
} |
1208 | 1211 |
} |
1209 | 1212 |
if (local_csup) delete csup; |
1210 | 1213 |
if (!circ_result) return false; |
1211 | 1214 |
|
1212 | 1215 |
// Set data for the artificial root node |
1213 | 1216 |
_root = _node_num; |
1214 | 1217 |
_parent[_root] = -1; |
1215 | 1218 |
_pred[_root] = -1; |
1216 | 1219 |
_thread[_root] = 0; |
1217 | 1220 |
_rev_thread[0] = _root; |
1218 | 1221 |
_succ_num[_root] = all_node_num; |
1219 | 1222 |
_last_succ[_root] = _root - 1; |
1220 | 1223 |
_supply[_root] = -sum_supply; |
1221 | 1224 |
if (sum_supply < 0) { |
1222 | 1225 |
_pi[_root] = -art_cost; |
1223 | 1226 |
} else { |
1224 | 1227 |
_pi[_root] = art_cost; |
1225 | 1228 |
} |
1226 | 1229 |
|
1227 | 1230 |
// Store the arcs in a mixed order |
1228 |
int k = std::max(int(sqrt(_arc_num)), 10); |
|
1231 |
int k = std::max(int(std::sqrt(double(_arc_num))), 10); |
|
1229 | 1232 |
int i = 0; |
1230 | 1233 |
for (ArcIt e(_graph); e != INVALID; ++e) { |
1231 | 1234 |
_arc_ref[i] = e; |
1232 | 1235 |
if ((i += k) >= _arc_num) i = (i % k) + 1; |
1233 | 1236 |
} |
1234 | 1237 |
|
1235 | 1238 |
// Initialize arc maps |
1236 | 1239 |
if (_pupper && _pcost) { |
1237 | 1240 |
for (int i = 0; i != _arc_num; ++i) { |
1238 | 1241 |
Arc e = _arc_ref[i]; |
1239 | 1242 |
_source[i] = _node_id[_graph.source(e)]; |
1240 | 1243 |
_target[i] = _node_id[_graph.target(e)]; |
1241 | 1244 |
_cap[i] = (*_pupper)[e]; |
1242 | 1245 |
_cost[i] = (*_pcost)[e]; |
1243 | 1246 |
_flow[i] = 0; |
1244 | 1247 |
_state[i] = STATE_LOWER; |
1245 | 1248 |
} |
1246 | 1249 |
} else { |
1247 | 1250 |
for (int i = 0; i != _arc_num; ++i) { |
1248 | 1251 |
Arc e = _arc_ref[i]; |
1249 | 1252 |
_source[i] = _node_id[_graph.source(e)]; |
1250 | 1253 |
_target[i] = _node_id[_graph.target(e)]; |
1251 | 1254 |
_flow[i] = 0; |
1252 | 1255 |
_state[i] = STATE_LOWER; |
1253 | 1256 |
} |
1254 | 1257 |
if (_pupper) { |
1255 | 1258 |
for (int i = 0; i != _arc_num; ++i) |
1256 | 1259 |
_cap[i] = (*_pupper)[_arc_ref[i]]; |
1257 | 1260 |
} else { |
1258 | 1261 |
for (int i = 0; i != _arc_num; ++i) |
1259 | 1262 |
_cap[i] = inf_cap; |
1260 | 1263 |
} |
1261 | 1264 |
if (_pcost) { |
1262 | 1265 |
for (int i = 0; i != _arc_num; ++i) |
1263 | 1266 |
_cost[i] = (*_pcost)[_arc_ref[i]]; |
1264 | 1267 |
} else { |
1265 | 1268 |
for (int i = 0; i != _arc_num; ++i) |
1266 | 1269 |
_cost[i] = 1; |
1267 | 1270 |
} |
1268 | 1271 |
} |
1269 | 1272 |
|
1270 | 1273 |
// Remove non-zero lower bounds |
1271 | 1274 |
if (_plower) { |
1272 | 1275 |
for (int i = 0; i != _arc_num; ++i) { |
1273 | 1276 |
Flow c = (*_plower)[_arc_ref[i]]; |
1274 | 1277 |
if (c != 0) { |
1275 | 1278 |
_cap[i] -= c; |
1276 | 1279 |
_supply[_source[i]] -= c; |
1277 | 1280 |
_supply[_target[i]] += c; |
1278 | 1281 |
} |
1279 | 1282 |
} |
1280 | 1283 |
} |
1281 | 1284 |
|
1282 | 1285 |
// Add artificial arcs and initialize the spanning tree data structure |
1283 | 1286 |
for (int u = 0, e = _arc_num; u != _node_num; ++u, ++e) { |
1284 | 1287 |
_thread[u] = u + 1; |
1285 | 1288 |
_rev_thread[u + 1] = u; |
1286 | 1289 |
_succ_num[u] = 1; |
1287 | 1290 |
_last_succ[u] = u; |
1288 | 1291 |
_parent[u] = _root; |
1289 | 1292 |
_pred[u] = e; |
1290 | 1293 |
_cost[e] = art_cost; |
1291 | 1294 |
_cap[e] = inf_cap; |
1292 | 1295 |
_state[e] = STATE_TREE; |
1293 | 1296 |
if (_supply[u] > 0 || (_supply[u] == 0 && sum_supply <= 0)) { |
1294 | 1297 |
_flow[e] = _supply[u]; |
1295 | 1298 |
_forward[u] = true; |
1296 | 1299 |
_pi[u] = -art_cost + _pi[_root]; |
1297 | 1300 |
} else { |
1298 | 1301 |
_flow[e] = -_supply[u]; |
1299 | 1302 |
_forward[u] = false; |
1300 | 1303 |
_pi[u] = art_cost + _pi[_root]; |
1301 | 1304 |
} |
1302 | 1305 |
} |
1303 | 1306 |
|
1304 | 1307 |
return true; |
1305 | 1308 |
} |
1306 | 1309 |
|
1307 | 1310 |
// Find the join node |
1308 | 1311 |
void findJoinNode() { |
1309 | 1312 |
int u = _source[in_arc]; |
1310 | 1313 |
int v = _target[in_arc]; |
1311 | 1314 |
while (u != v) { |
1312 | 1315 |
if (_succ_num[u] < _succ_num[v]) { |
1313 | 1316 |
u = _parent[u]; |
1314 | 1317 |
} else { |
1315 | 1318 |
v = _parent[v]; |
1316 | 1319 |
} |
1317 | 1320 |
} |
1318 | 1321 |
join = u; |
1319 | 1322 |
} |
1320 | 1323 |
|
1321 | 1324 |
// Find the leaving arc of the cycle and returns true if the |
1322 | 1325 |
// leaving arc is not the same as the entering arc |
1323 | 1326 |
bool findLeavingArc() { |
1324 | 1327 |
// Initialize first and second nodes according to the direction |
1325 | 1328 |
// of the cycle |
1326 | 1329 |
if (_state[in_arc] == STATE_LOWER) { |
1327 | 1330 |
first = _source[in_arc]; |
1328 | 1331 |
second = _target[in_arc]; |
1329 | 1332 |
} else { |
1330 | 1333 |
first = _target[in_arc]; |
1331 | 1334 |
second = _source[in_arc]; |
1332 | 1335 |
} |
1333 | 1336 |
delta = _cap[in_arc]; |
1334 | 1337 |
int result = 0; |
1335 | 1338 |
Flow d; |
1336 | 1339 |
int e; |
1337 | 1340 |
|
1338 | 1341 |
// Search the cycle along the path form the first node to the root |
1339 | 1342 |
for (int u = first; u != join; u = _parent[u]) { |
1340 | 1343 |
e = _pred[u]; |
1341 | 1344 |
d = _forward[u] ? _flow[e] : _cap[e] - _flow[e]; |
1342 | 1345 |
if (d < delta) { |
1343 | 1346 |
delta = d; |
1344 | 1347 |
u_out = u; |
1345 | 1348 |
result = 1; |
1346 | 1349 |
} |
1347 | 1350 |
} |
1348 | 1351 |
// Search the cycle along the path form the second node to the root |
1349 | 1352 |
for (int u = second; u != join; u = _parent[u]) { |
1350 | 1353 |
e = _pred[u]; |
1351 | 1354 |
d = _forward[u] ? _cap[e] - _flow[e] : _flow[e]; |
1352 | 1355 |
if (d <= delta) { |
1353 | 1356 |
delta = d; |
1354 | 1357 |
u_out = u; |
1355 | 1358 |
result = 2; |
1356 | 1359 |
} |
1357 | 1360 |
} |
1358 | 1361 |
|
1359 | 1362 |
if (result == 1) { |
1360 | 1363 |
u_in = first; |
1361 | 1364 |
v_in = second; |
1362 | 1365 |
} else { |
1363 | 1366 |
u_in = second; |
1364 | 1367 |
v_in = first; |
1365 | 1368 |
} |
1366 | 1369 |
return result != 0; |
1367 | 1370 |
} |
1368 | 1371 |
|
1369 | 1372 |
// Change _flow and _state vectors |
1370 | 1373 |
void changeFlow(bool change) { |
1371 | 1374 |
// Augment along the cycle |
1372 | 1375 |
if (delta > 0) { |
1373 | 1376 |
Flow val = _state[in_arc] * delta; |
1374 | 1377 |
_flow[in_arc] += val; |
1375 | 1378 |
for (int u = _source[in_arc]; u != join; u = _parent[u]) { |
1376 | 1379 |
_flow[_pred[u]] += _forward[u] ? -val : val; |
1377 | 1380 |
} |
1378 | 1381 |
for (int u = _target[in_arc]; u != join; u = _parent[u]) { |
1379 | 1382 |
_flow[_pred[u]] += _forward[u] ? val : -val; |
1380 | 1383 |
} |
1381 | 1384 |
} |
1382 | 1385 |
// Update the state of the entering and leaving arcs |
1383 | 1386 |
if (change) { |
1384 | 1387 |
_state[in_arc] = STATE_TREE; |
1385 | 1388 |
_state[_pred[u_out]] = |
1386 | 1389 |
(_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; |
1387 | 1390 |
} else { |
1388 | 1391 |
_state[in_arc] = -_state[in_arc]; |
1389 | 1392 |
} |
1390 | 1393 |
} |
1391 | 1394 |
|
1392 | 1395 |
// Update the tree structure |
1393 | 1396 |
void updateTreeStructure() { |
1394 | 1397 |
int u, w; |
1395 | 1398 |
int old_rev_thread = _rev_thread[u_out]; |
1396 | 1399 |
int old_succ_num = _succ_num[u_out]; |
1397 | 1400 |
int old_last_succ = _last_succ[u_out]; |
1398 | 1401 |
v_out = _parent[u_out]; |
1399 | 1402 |
|
1400 | 1403 |
u = _last_succ[u_in]; // the last successor of u_in |
1401 | 1404 |
right = _thread[u]; // the node after it |
1402 | 1405 |
|
1403 | 1406 |
// Handle the case when old_rev_thread equals to v_in |
1404 | 1407 |
// (it also means that join and v_out coincide) |
1405 | 1408 |
if (old_rev_thread == v_in) { |
1406 | 1409 |
last = _thread[_last_succ[u_out]]; |
1407 | 1410 |
} else { |
1408 | 1411 |
last = _thread[v_in]; |
1409 | 1412 |
} |
1410 | 1413 |
|
1411 | 1414 |
// Update _thread and _parent along the stem nodes (i.e. the nodes |
1412 | 1415 |
// between u_in and u_out, whose parent have to be changed) |
1413 | 1416 |
_thread[v_in] = stem = u_in; |
1414 | 1417 |
_dirty_revs.clear(); |
1415 | 1418 |
_dirty_revs.push_back(v_in); |
1416 | 1419 |
par_stem = v_in; |
1417 | 1420 |
while (stem != u_out) { |
1418 | 1421 |
// Insert the next stem node into the thread list |
1419 | 1422 |
new_stem = _parent[stem]; |
1420 | 1423 |
_thread[u] = new_stem; |
1421 | 1424 |
_dirty_revs.push_back(u); |
1422 | 1425 |
|
1423 | 1426 |
// Remove the subtree of stem from the thread list |
1424 | 1427 |
w = _rev_thread[stem]; |
1425 | 1428 |
_thread[w] = right; |
1426 | 1429 |
_rev_thread[right] = w; |
1427 | 1430 |
|
1428 | 1431 |
// Change the parent node and shift stem nodes |
1429 | 1432 |
_parent[stem] = par_stem; |
1430 | 1433 |
par_stem = stem; |
1431 | 1434 |
stem = new_stem; |
1432 | 1435 |
|
1433 | 1436 |
// Update u and right |
1434 | 1437 |
u = _last_succ[stem] == _last_succ[par_stem] ? |
1435 | 1438 |
_rev_thread[par_stem] : _last_succ[stem]; |
1436 | 1439 |
right = _thread[u]; |
1437 | 1440 |
} |
1438 | 1441 |
_parent[u_out] = par_stem; |
1439 | 1442 |
_thread[u] = last; |
1440 | 1443 |
_rev_thread[last] = u; |
1441 | 1444 |
_last_succ[u_out] = u; |
1442 | 1445 |
|
1443 | 1446 |
// Remove the subtree of u_out from the thread list except for |
1444 | 1447 |
// the case when old_rev_thread equals to v_in |
1445 | 1448 |
// (it also means that join and v_out coincide) |
1446 | 1449 |
if (old_rev_thread != v_in) { |
1447 | 1450 |
_thread[old_rev_thread] = right; |
1448 | 1451 |
_rev_thread[right] = old_rev_thread; |
1449 | 1452 |
} |
1450 | 1453 |
|
1451 | 1454 |
// Update _rev_thread using the new _thread values |
1452 | 1455 |
for (int i = 0; i < int(_dirty_revs.size()); ++i) { |
1453 | 1456 |
u = _dirty_revs[i]; |
1454 | 1457 |
_rev_thread[_thread[u]] = u; |
1455 | 1458 |
} |
1456 | 1459 |
|
1457 | 1460 |
// Update _pred, _forward, _last_succ and _succ_num for the |
1458 | 1461 |
// stem nodes from u_out to u_in |
1459 | 1462 |
int tmp_sc = 0, tmp_ls = _last_succ[u_out]; |
1460 | 1463 |
u = u_out; |
1461 | 1464 |
while (u != u_in) { |
1462 | 1465 |
w = _parent[u]; |
1463 | 1466 |
_pred[u] = _pred[w]; |
1464 | 1467 |
_forward[u] = !_forward[w]; |
1465 | 1468 |
tmp_sc += _succ_num[u] - _succ_num[w]; |
1466 | 1469 |
_succ_num[u] = tmp_sc; |
1467 | 1470 |
_last_succ[w] = tmp_ls; |
1468 | 1471 |
u = w; |
1469 | 1472 |
} |
1470 | 1473 |
_pred[u_in] = in_arc; |
1471 | 1474 |
_forward[u_in] = (u_in == _source[in_arc]); |
1472 | 1475 |
_succ_num[u_in] = old_succ_num; |
1473 | 1476 |
|
1474 | 1477 |
// Set limits for updating _last_succ form v_in and v_out |
1475 | 1478 |
// towards the root |
1476 | 1479 |
int up_limit_in = -1; |
1477 | 1480 |
int up_limit_out = -1; |
1478 | 1481 |
if (_last_succ[join] == v_in) { |
1479 | 1482 |
up_limit_out = join; |
1480 | 1483 |
} else { |
1481 | 1484 |
up_limit_in = join; |
1482 | 1485 |
} |
1483 | 1486 |
|
1484 | 1487 |
// Update _last_succ from v_in towards the root |
1485 | 1488 |
for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; |
1486 | 1489 |
u = _parent[u]) { |
1487 | 1490 |
_last_succ[u] = _last_succ[u_out]; |
1488 | 1491 |
} |
1489 | 1492 |
// Update _last_succ from v_out towards the root |
1490 | 1493 |
if (join != old_rev_thread && v_in != old_rev_thread) { |
1491 | 1494 |
for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; |
1492 | 1495 |
u = _parent[u]) { |
1493 | 1496 |
_last_succ[u] = old_rev_thread; |
1494 | 1497 |
} |
1495 | 1498 |
} else { |
1496 | 1499 |
for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; |
1497 | 1500 |
u = _parent[u]) { |
1498 | 1501 |
_last_succ[u] = _last_succ[u_out]; |
1499 | 1502 |
} |
1500 | 1503 |
} |
1501 | 1504 |
|
1502 | 1505 |
// Update _succ_num from v_in to join |
1503 | 1506 |
for (u = v_in; u != join; u = _parent[u]) { |
1504 | 1507 |
_succ_num[u] += old_succ_num; |
1505 | 1508 |
} |
1506 | 1509 |
// Update _succ_num from v_out to join |
1507 | 1510 |
for (u = v_out; u != join; u = _parent[u]) { |
1508 | 1511 |
_succ_num[u] -= old_succ_num; |
1509 | 1512 |
} |
1510 | 1513 |
} |
1511 | 1514 |
|
1512 | 1515 |
// Update potentials |
1513 | 1516 |
void updatePotential() { |
1514 | 1517 |
Cost sigma = _forward[u_in] ? |
1515 | 1518 |
_pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : |
1516 | 1519 |
_pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; |
1517 | 1520 |
// Update potentials in the subtree, which has been moved |
1518 | 1521 |
int end = _thread[_last_succ[u_in]]; |
1519 | 1522 |
for (int u = u_in; u != end; u = _thread[u]) { |
1520 | 1523 |
_pi[u] += sigma; |
1521 | 1524 |
} |
1522 | 1525 |
} |
1523 | 1526 |
|
1524 | 1527 |
// Execute the algorithm |
1525 | 1528 |
bool start(PivotRule pivot_rule) { |
1526 | 1529 |
// Select the pivot rule implementation |
1527 | 1530 |
switch (pivot_rule) { |
1528 | 1531 |
case FIRST_ELIGIBLE: |
1529 | 1532 |
return start<FirstEligiblePivotRule>(); |
1530 | 1533 |
case BEST_ELIGIBLE: |
1531 | 1534 |
return start<BestEligiblePivotRule>(); |
1532 | 1535 |
case BLOCK_SEARCH: |
1533 | 1536 |
return start<BlockSearchPivotRule>(); |
1534 | 1537 |
case CANDIDATE_LIST: |
1535 | 1538 |
return start<CandidateListPivotRule>(); |
1536 | 1539 |
case ALTERING_LIST: |
1537 | 1540 |
return start<AlteringListPivotRule>(); |
1538 | 1541 |
} |
1539 | 1542 |
return false; |
1540 | 1543 |
} |
1541 | 1544 |
|
1542 | 1545 |
template <typename PivotRuleImpl> |
1543 | 1546 |
bool start() { |
1544 | 1547 |
PivotRuleImpl pivot(*this); |
1545 | 1548 |
|
1546 | 1549 |
// Execute the Network Simplex algorithm |
1547 | 1550 |
while (pivot.findEnteringArc()) { |
1548 | 1551 |
findJoinNode(); |
1549 | 1552 |
bool change = findLeavingArc(); |
1550 | 1553 |
changeFlow(change); |
1551 | 1554 |
if (change) { |
1552 | 1555 |
updateTreeStructure(); |
1553 | 1556 |
updatePotential(); |
1554 | 1557 |
} |
1555 | 1558 |
} |
1556 | 1559 |
|
1557 | 1560 |
// Copy flow values to _flow_map |
1558 | 1561 |
if (_plower) { |
1559 | 1562 |
for (int i = 0; i != _arc_num; ++i) { |
1560 | 1563 |
Arc e = _arc_ref[i]; |
1561 | 1564 |
_flow_map->set(e, (*_plower)[e] + _flow[i]); |
1562 | 1565 |
} |
1563 | 1566 |
} else { |
1564 | 1567 |
for (int i = 0; i != _arc_num; ++i) { |
1565 | 1568 |
_flow_map->set(_arc_ref[i], _flow[i]); |
1566 | 1569 |
} |
1567 | 1570 |
} |
1568 | 1571 |
// Copy potential values to _potential_map |
1569 | 1572 |
for (NodeIt n(_graph); n != INVALID; ++n) { |
1570 | 1573 |
_potential_map->set(n, _pi[_node_id[n]]); |
1571 | 1574 |
} |
1572 | 1575 |
|
1573 | 1576 |
return true; |
1574 | 1577 |
} |
1575 | 1578 |
|
1576 | 1579 |
}; //class NetworkSimplex |
1577 | 1580 |
|
1578 | 1581 |
///@} |
1579 | 1582 |
|
1580 | 1583 |
} //namespace lemon |
1581 | 1584 |
|
1582 | 1585 |
#endif //LEMON_NETWORK_SIMPLEX_H |
1 | 1 |
/* -*- mode: C++; indent-tabs-mode: nil; -*- |
2 | 2 |
* |
3 | 3 |
* This file is a part of LEMON, a generic C++ optimization library. |
4 | 4 |
* |
5 | 5 |
* Copyright (C) 2003-2009 |
6 | 6 |
* Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport |
7 | 7 |
* (Egervary Research Group on Combinatorial Optimization, EGRES). |
8 | 8 |
* |
9 | 9 |
* Permission to use, modify and distribute this software is granted |
10 | 10 |
* provided that this copyright notice appears in all copies. For |
11 | 11 |
* precise terms see the accompanying LICENSE file. |
12 | 12 |
* |
13 | 13 |
* This software is provided "AS IS" with no warranty of any kind, |
14 | 14 |
* express or implied, and with no claim as to its suitability for any |
15 | 15 |
* purpose. |
16 | 16 |
* |
17 | 17 |
*/ |
18 | 18 |
|
19 | 19 |
/// \ingroup tools |
20 | 20 |
/// \file |
21 | 21 |
/// \brief Special plane digraph generator. |
22 | 22 |
/// |
23 | 23 |
/// Graph generator application for various types of plane graphs. |
24 | 24 |
/// |
25 | 25 |
/// See |
26 | 26 |
/// \code |
27 | 27 |
/// lgf-gen --help |
28 | 28 |
/// \endcode |
29 | 29 |
/// for more info on the usage. |
30 | 30 |
|
31 | 31 |
#include <algorithm> |
32 | 32 |
#include <set> |
33 | 33 |
#include <ctime> |
34 | 34 |
#include <lemon/list_graph.h> |
35 | 35 |
#include <lemon/random.h> |
36 | 36 |
#include <lemon/dim2.h> |
37 | 37 |
#include <lemon/bfs.h> |
38 | 38 |
#include <lemon/counter.h> |
39 | 39 |
#include <lemon/suurballe.h> |
40 | 40 |
#include <lemon/graph_to_eps.h> |
41 | 41 |
#include <lemon/lgf_writer.h> |
42 | 42 |
#include <lemon/arg_parser.h> |
43 | 43 |
#include <lemon/euler.h> |
44 | 44 |
#include <lemon/math.h> |
45 | 45 |
#include <lemon/kruskal.h> |
46 | 46 |
#include <lemon/time_measure.h> |
47 | 47 |
|
48 | 48 |
using namespace lemon; |
49 | 49 |
|
50 | 50 |
typedef dim2::Point<double> Point; |
51 | 51 |
|
52 | 52 |
GRAPH_TYPEDEFS(ListGraph); |
53 | 53 |
|
54 | 54 |
bool progress=true; |
55 | 55 |
|
56 | 56 |
int N; |
57 | 57 |
// int girth; |
58 | 58 |
|
59 | 59 |
ListGraph g; |
60 | 60 |
|
61 | 61 |
std::vector<Node> nodes; |
62 | 62 |
ListGraph::NodeMap<Point> coords(g); |
63 | 63 |
|
64 | 64 |
|
65 | 65 |
double totalLen(){ |
66 | 66 |
double tlen=0; |
67 | 67 |
for(EdgeIt e(g);e!=INVALID;++e) |
68 |
tlen+=sqrt((coords[g.v(e)]-coords[g.u(e)]).normSquare()); |
|
68 |
tlen+=std::sqrt((coords[g.v(e)]-coords[g.u(e)]).normSquare()); |
|
69 | 69 |
return tlen; |
70 | 70 |
} |
71 | 71 |
|
72 | 72 |
int tsp_impr_num=0; |
73 | 73 |
|
74 | 74 |
const double EPSILON=1e-8; |
75 | 75 |
bool tsp_improve(Node u, Node v) |
76 | 76 |
{ |
77 | 77 |
double luv=std::sqrt((coords[v]-coords[u]).normSquare()); |
78 | 78 |
Node u2=u; |
79 | 79 |
Node v2=v; |
80 | 80 |
do { |
81 | 81 |
Node n; |
82 | 82 |
for(IncEdgeIt e(g,v2);(n=g.runningNode(e))==u2;++e) { } |
83 | 83 |
u2=v2; |
84 | 84 |
v2=n; |
85 | 85 |
if(luv+std::sqrt((coords[v2]-coords[u2]).normSquare())-EPSILON> |
86 | 86 |
std::sqrt((coords[u]-coords[u2]).normSquare())+ |
87 | 87 |
std::sqrt((coords[v]-coords[v2]).normSquare())) |
88 | 88 |
{ |
89 | 89 |
g.erase(findEdge(g,u,v)); |
90 | 90 |
g.erase(findEdge(g,u2,v2)); |
91 | 91 |
g.addEdge(u2,u); |
92 | 92 |
g.addEdge(v,v2); |
93 | 93 |
tsp_impr_num++; |
94 | 94 |
return true; |
95 | 95 |
} |
96 | 96 |
} while(v2!=u); |
97 | 97 |
return false; |
98 | 98 |
} |
99 | 99 |
|
100 | 100 |
bool tsp_improve(Node u) |
101 | 101 |
{ |
102 | 102 |
for(IncEdgeIt e(g,u);e!=INVALID;++e) |
103 | 103 |
if(tsp_improve(u,g.runningNode(e))) return true; |
104 | 104 |
return false; |
105 | 105 |
} |
106 | 106 |
|
107 | 107 |
void tsp_improve() |
108 | 108 |
{ |
109 | 109 |
bool b; |
110 | 110 |
do { |
111 | 111 |
b=false; |
112 | 112 |
for(NodeIt n(g);n!=INVALID;++n) |
113 | 113 |
if(tsp_improve(n)) b=true; |
114 | 114 |
} while(b); |
115 | 115 |
} |
116 | 116 |
|
117 | 117 |
void tsp() |
118 | 118 |
{ |
119 | 119 |
for(int i=0;i<N;i++) g.addEdge(nodes[i],nodes[(i+1)%N]); |
120 | 120 |
tsp_improve(); |
121 | 121 |
} |
122 | 122 |
|
123 | 123 |
class Line |
124 | 124 |
{ |
125 | 125 |
public: |
126 | 126 |
Point a; |
127 | 127 |
Point b; |
128 | 128 |
Line(Point _a,Point _b) :a(_a),b(_b) {} |
129 | 129 |
Line(Node _a,Node _b) : a(coords[_a]),b(coords[_b]) {} |
130 | 130 |
Line(const Arc &e) : a(coords[g.source(e)]),b(coords[g.target(e)]) {} |
131 | 131 |
Line(const Edge &e) : a(coords[g.u(e)]),b(coords[g.v(e)]) {} |
132 | 132 |
}; |
133 | 133 |
|
134 | 134 |
inline std::ostream& operator<<(std::ostream &os, const Line &l) |
135 | 135 |
{ |
136 | 136 |
os << l.a << "->" << l.b; |
137 | 137 |
return os; |
138 | 138 |
} |
139 | 139 |
|
140 | 140 |
bool cross(Line a, Line b) |
141 | 141 |
{ |
142 | 142 |
Point ao=rot90(a.b-a.a); |
143 | 143 |
Point bo=rot90(b.b-b.a); |
144 | 144 |
return (ao*(b.a-a.a))*(ao*(b.b-a.a))<0 && |
145 | 145 |
(bo*(a.a-b.a))*(bo*(a.b-b.a))<0; |
146 | 146 |
} |
147 | 147 |
|
148 | 148 |
struct Parc |
149 | 149 |
{ |
150 | 150 |
Node a; |
151 | 151 |
Node b; |
152 | 152 |
double len; |
153 | 153 |
}; |
154 | 154 |
|
155 | 155 |
bool pedgeLess(Parc a,Parc b) |
156 | 156 |
{ |
157 | 157 |
return a.len<b.len; |
158 | 158 |
} |
159 | 159 |
|
160 | 160 |
std::vector<Edge> arcs; |
161 | 161 |
|
162 | 162 |
namespace _delaunay_bits { |
163 | 163 |
|
164 | 164 |
struct Part { |
165 | 165 |
int prev, curr, next; |
166 | 166 |
|
167 | 167 |
Part(int p, int c, int n) : prev(p), curr(c), next(n) {} |
168 | 168 |
}; |
169 | 169 |
|
170 | 170 |
inline std::ostream& operator<<(std::ostream& os, const Part& part) { |
171 | 171 |
os << '(' << part.prev << ',' << part.curr << ',' << part.next << ')'; |
172 | 172 |
return os; |
173 | 173 |
} |
174 | 174 |
|
175 | 175 |
inline double circle_point(const Point& p, const Point& q, const Point& r) { |
176 | 176 |
double a = p.x * (q.y - r.y) + q.x * (r.y - p.y) + r.x * (p.y - q.y); |
177 | 177 |
if (a == 0) return std::numeric_limits<double>::quiet_NaN(); |
178 | 178 |
|
179 | 179 |
double d = (p.x * p.x + p.y * p.y) * (q.y - r.y) + |
180 | 180 |
(q.x * q.x + q.y * q.y) * (r.y - p.y) + |
181 | 181 |
(r.x * r.x + r.y * r.y) * (p.y - q.y); |
182 | 182 |
|
183 | 183 |
double e = (p.x * p.x + p.y * p.y) * (q.x - r.x) + |
184 | 184 |
(q.x * q.x + q.y * q.y) * (r.x - p.x) + |
185 | 185 |
(r.x * r.x + r.y * r.y) * (p.x - q.x); |
186 | 186 |
|
187 | 187 |
double f = (p.x * p.x + p.y * p.y) * (q.x * r.y - r.x * q.y) + |
188 | 188 |
(q.x * q.x + q.y * q.y) * (r.x * p.y - p.x * r.y) + |
189 | 189 |
(r.x * r.x + r.y * r.y) * (p.x * q.y - q.x * p.y); |
190 | 190 |
|
191 |
return d / (2 * a) + sqrt((d * d + e * e) / (4 * a * a) + f / a); |
|
191 |
return d / (2 * a) + std::sqrt((d * d + e * e) / (4 * a * a) + f / a); |
|
192 | 192 |
} |
193 | 193 |
|
194 | 194 |
inline bool circle_form(const Point& p, const Point& q, const Point& r) { |
195 | 195 |
return rot90(q - p) * (r - q) < 0.0; |
196 | 196 |
} |
197 | 197 |
|
198 | 198 |
inline double intersection(const Point& p, const Point& q, double sx) { |
199 | 199 |
const double epsilon = 1e-8; |
200 | 200 |
|
201 | 201 |
if (p.x == q.x) return (p.y + q.y) / 2.0; |
202 | 202 |
|
203 | 203 |
if (sx < p.x + epsilon) return p.y; |
204 | 204 |
if (sx < q.x + epsilon) return q.y; |
205 | 205 |
|
206 | 206 |
double a = q.x - p.x; |
207 | 207 |
double b = (q.x - sx) * p.y - (p.x - sx) * q.y; |
208 | 208 |
double d = (q.x - sx) * (p.x - sx) * (p - q).normSquare(); |
209 |
return (b - sqrt(d)) / a; |
|
209 |
return (b - std::sqrt(d)) / a; |
|
210 | 210 |
} |
211 | 211 |
|
212 | 212 |
struct YLess { |
213 | 213 |
|
214 | 214 |
|
215 | 215 |
YLess(const std::vector<Point>& points, double& sweep) |
216 | 216 |
: _points(points), _sweep(sweep) {} |
217 | 217 |
|
218 | 218 |
bool operator()(const Part& l, const Part& r) const { |
219 | 219 |
const double epsilon = 1e-8; |
220 | 220 |
|
221 | 221 |
// std::cerr << l << " vs " << r << std::endl; |
222 | 222 |
double lbx = l.prev != -1 ? |
223 | 223 |
intersection(_points[l.prev], _points[l.curr], _sweep) : |
224 | 224 |
- std::numeric_limits<double>::infinity(); |
225 | 225 |
double rbx = r.prev != -1 ? |
226 | 226 |
intersection(_points[r.prev], _points[r.curr], _sweep) : |
227 | 227 |
- std::numeric_limits<double>::infinity(); |
228 | 228 |
double lex = l.next != -1 ? |
229 | 229 |
intersection(_points[l.curr], _points[l.next], _sweep) : |
230 | 230 |
std::numeric_limits<double>::infinity(); |
231 | 231 |
double rex = r.next != -1 ? |
232 | 232 |
intersection(_points[r.curr], _points[r.next], _sweep) : |
233 | 233 |
std::numeric_limits<double>::infinity(); |
234 | 234 |
|
235 | 235 |
if (lbx > lex) std::swap(lbx, lex); |
236 | 236 |
if (rbx > rex) std::swap(rbx, rex); |
237 | 237 |
|
238 | 238 |
if (lex < epsilon + rex && lbx + epsilon < rex) return true; |
239 | 239 |
if (rex < epsilon + lex && rbx + epsilon < lex) return false; |
240 | 240 |
return lex < rex; |
241 | 241 |
} |
242 | 242 |
|
243 | 243 |
const std::vector<Point>& _points; |
244 | 244 |
double& _sweep; |
245 | 245 |
}; |
246 | 246 |
|
247 | 247 |
struct BeachIt; |
248 | 248 |
|
249 | 249 |
typedef std::multimap<double, BeachIt> SpikeHeap; |
250 | 250 |
|
251 | 251 |
typedef std::multimap<Part, SpikeHeap::iterator, YLess> Beach; |
252 | 252 |
|
253 | 253 |
struct BeachIt { |
254 | 254 |
Beach::iterator it; |
255 | 255 |
|
256 | 256 |
BeachIt(Beach::iterator iter) : it(iter) {} |
257 | 257 |
}; |
258 | 258 |
|
259 | 259 |
} |
260 | 260 |
|
261 | 261 |
inline void delaunay() { |
262 | 262 |
Counter cnt("Number of arcs added: "); |
263 | 263 |
|
264 | 264 |
using namespace _delaunay_bits; |
265 | 265 |
|
266 | 266 |
typedef _delaunay_bits::Part Part; |
267 | 267 |
typedef std::vector<std::pair<double, int> > SiteHeap; |
268 | 268 |
|
269 | 269 |
|
270 | 270 |
std::vector<Point> points; |
271 | 271 |
std::vector<Node> nodes; |
272 | 272 |
|
273 | 273 |
for (NodeIt it(g); it != INVALID; ++it) { |
274 | 274 |
nodes.push_back(it); |
275 | 275 |
points.push_back(coords[it]); |
276 | 276 |
} |
277 | 277 |
|
278 | 278 |
SiteHeap siteheap(points.size()); |
279 | 279 |
|
280 | 280 |
double sweep; |
281 | 281 |
|
282 | 282 |
|
283 | 283 |
for (int i = 0; i < int(siteheap.size()); ++i) { |
284 | 284 |
siteheap[i] = std::make_pair(points[i].x, i); |
285 | 285 |
} |
286 | 286 |
|
287 | 287 |
std::sort(siteheap.begin(), siteheap.end()); |
288 | 288 |
sweep = siteheap.front().first; |
289 | 289 |
|
290 | 290 |
YLess yless(points, sweep); |
291 | 291 |
Beach beach(yless); |
292 | 292 |
|
293 | 293 |
SpikeHeap spikeheap; |
294 | 294 |
|
295 | 295 |
std::set<std::pair<int, int> > arcs; |
296 | 296 |
|
297 | 297 |
int siteindex = 0; |
298 | 298 |
{ |
299 | 299 |
SiteHeap front; |
300 | 300 |
|
301 | 301 |
while (siteindex < int(siteheap.size()) && |
302 | 302 |
siteheap[0].first == siteheap[siteindex].first) { |
303 | 303 |
front.push_back(std::make_pair(points[siteheap[siteindex].second].y, |
304 | 304 |
siteheap[siteindex].second)); |
305 | 305 |
++siteindex; |
306 | 306 |
} |
307 | 307 |
|
308 | 308 |
std::sort(front.begin(), front.end()); |
309 | 309 |
|
310 | 310 |
for (int i = 0; i < int(front.size()); ++i) { |
311 | 311 |
int prev = (i == 0 ? -1 : front[i - 1].second); |
312 | 312 |
int curr = front[i].second; |
313 | 313 |
int next = (i + 1 == int(front.size()) ? -1 : front[i + 1].second); |
314 | 314 |
|
315 | 315 |
beach.insert(std::make_pair(Part(prev, curr, next), |
316 | 316 |
spikeheap.end())); |
317 | 317 |
} |
318 | 318 |
} |
319 | 319 |
|
320 | 320 |
while (siteindex < int(points.size()) || !spikeheap.empty()) { |
321 | 321 |
|
322 | 322 |
SpikeHeap::iterator spit = spikeheap.begin(); |
323 | 323 |
|
324 | 324 |
if (siteindex < int(points.size()) && |
325 | 325 |
(spit == spikeheap.end() || siteheap[siteindex].first < spit->first)) { |
326 | 326 |
int site = siteheap[siteindex].second; |
327 | 327 |
sweep = siteheap[siteindex].first; |
328 | 328 |
|
329 | 329 |
Beach::iterator bit = beach.upper_bound(Part(site, site, site)); |
330 | 330 |
|
331 | 331 |
if (bit->second != spikeheap.end()) { |
332 | 332 |
spikeheap.erase(bit->second); |
333 | 333 |
} |
334 | 334 |
|
335 | 335 |
int prev = bit->first.prev; |
336 | 336 |
int curr = bit->first.curr; |
337 | 337 |
int next = bit->first.next; |
338 | 338 |
|
339 | 339 |
beach.erase(bit); |
340 | 340 |
|
341 | 341 |
SpikeHeap::iterator pit = spikeheap.end(); |
342 | 342 |
if (prev != -1 && |
343 | 343 |
circle_form(points[prev], points[curr], points[site])) { |
344 | 344 |
double x = circle_point(points[prev], points[curr], points[site]); |
345 | 345 |
pit = spikeheap.insert(std::make_pair(x, BeachIt(beach.end()))); |
346 | 346 |
pit->second.it = |
347 | 347 |
beach.insert(std::make_pair(Part(prev, curr, site), pit)); |
348 | 348 |
} else { |
349 | 349 |
beach.insert(std::make_pair(Part(prev, curr, site), pit)); |
350 | 350 |
} |
351 | 351 |
|
352 | 352 |
beach.insert(std::make_pair(Part(curr, site, curr), spikeheap.end())); |
353 | 353 |
|
354 | 354 |
SpikeHeap::iterator nit = spikeheap.end(); |
355 | 355 |
if (next != -1 && |
356 | 356 |
circle_form(points[site], points[curr],points[next])) { |
357 | 357 |
double x = circle_point(points[site], points[curr], points[next]); |
358 | 358 |
nit = spikeheap.insert(std::make_pair(x, BeachIt(beach.end()))); |
359 | 359 |
nit->second.it = |
360 | 360 |
beach.insert(std::make_pair(Part(site, curr, next), nit)); |
361 | 361 |
} else { |
362 | 362 |
beach.insert(std::make_pair(Part(site, curr, next), nit)); |
363 | 363 |
} |
364 | 364 |
|
365 | 365 |
++siteindex; |
366 | 366 |
} else { |
367 | 367 |
sweep = spit->first; |
368 | 368 |
|
369 | 369 |
Beach::iterator bit = spit->second.it; |
370 | 370 |
|
371 | 371 |
int prev = bit->first.prev; |
372 | 372 |
int curr = bit->first.curr; |
373 | 373 |
int next = bit->first.next; |
374 | 374 |
|
375 | 375 |
{ |
376 | 376 |
std::pair<int, int> arc; |
377 | 377 |
|
378 | 378 |
arc = prev < curr ? |
379 | 379 |
std::make_pair(prev, curr) : std::make_pair(curr, prev); |
380 | 380 |
|
381 | 381 |
if (arcs.find(arc) == arcs.end()) { |
382 | 382 |
arcs.insert(arc); |
383 | 383 |
g.addEdge(nodes[prev], nodes[curr]); |
384 | 384 |
++cnt; |
385 | 385 |
} |
386 | 386 |
|
387 | 387 |
arc = curr < next ? |
388 | 388 |
std::make_pair(curr, next) : std::make_pair(next, curr); |
389 | 389 |
|
390 | 390 |
if (arcs.find(arc) == arcs.end()) { |
391 | 391 |
arcs.insert(arc); |
392 | 392 |
g.addEdge(nodes[curr], nodes[next]); |
393 | 393 |
++cnt; |
394 | 394 |
} |
395 | 395 |
} |
396 | 396 |
|
397 | 397 |
Beach::iterator pbit = bit; --pbit; |
398 | 398 |
int ppv = pbit->first.prev; |
399 | 399 |
Beach::iterator nbit = bit; ++nbit; |
400 | 400 |
int nnt = nbit->first.next; |
401 | 401 |
|
402 | 402 |
if (bit->second != spikeheap.end()) spikeheap.erase(bit->second); |
403 | 403 |
if (pbit->second != spikeheap.end()) spikeheap.erase(pbit->second); |
404 | 404 |
if (nbit->second != spikeheap.end()) spikeheap.erase(nbit->second); |
405 | 405 |
|
406 | 406 |
beach.erase(nbit); |
407 | 407 |
beach.erase(bit); |
408 | 408 |
beach.erase(pbit); |
409 | 409 |
|
410 | 410 |
SpikeHeap::iterator pit = spikeheap.end(); |
411 | 411 |
if (ppv != -1 && ppv != next && |
412 | 412 |
circle_form(points[ppv], points[prev], points[next])) { |
413 | 413 |
double x = circle_point(points[ppv], points[prev], points[next]); |
414 | 414 |
if (x < sweep) x = sweep; |
415 | 415 |
pit = spikeheap.insert(std::make_pair(x, BeachIt(beach.end()))); |
416 | 416 |
pit->second.it = |
417 | 417 |
beach.insert(std::make_pair(Part(ppv, prev, next), pit)); |
418 | 418 |
} else { |
419 | 419 |
beach.insert(std::make_pair(Part(ppv, prev, next), pit)); |
420 | 420 |
} |
421 | 421 |
|
422 | 422 |
SpikeHeap::iterator nit = spikeheap.end(); |
423 | 423 |
if (nnt != -1 && prev != nnt && |
424 | 424 |
circle_form(points[prev], points[next], points[nnt])) { |
425 | 425 |
double x = circle_point(points[prev], points[next], points[nnt]); |
426 | 426 |
if (x < sweep) x = sweep; |
427 | 427 |
nit = spikeheap.insert(std::make_pair(x, BeachIt(beach.end()))); |
428 | 428 |
nit->second.it = |
429 | 429 |
beach.insert(std::make_pair(Part(prev, next, nnt), nit)); |
430 | 430 |
} else { |
431 | 431 |
beach.insert(std::make_pair(Part(prev, next, nnt), nit)); |
432 | 432 |
} |
433 | 433 |
|
434 | 434 |
} |
435 | 435 |
} |
436 | 436 |
|
437 | 437 |
for (Beach::iterator it = beach.begin(); it != beach.end(); ++it) { |
438 | 438 |
int curr = it->first.curr; |
439 | 439 |
int next = it->first.next; |
440 | 440 |
|
441 | 441 |
if (next == -1) continue; |
442 | 442 |
|
443 | 443 |
std::pair<int, int> arc; |
444 | 444 |
|
445 | 445 |
arc = curr < next ? |
446 | 446 |
std::make_pair(curr, next) : std::make_pair(next, curr); |
447 | 447 |
|
448 | 448 |
if (arcs.find(arc) == arcs.end()) { |
449 | 449 |
arcs.insert(arc); |
450 | 450 |
g.addEdge(nodes[curr], nodes[next]); |
451 | 451 |
++cnt; |
452 | 452 |
} |
453 | 453 |
} |
454 | 454 |
} |
455 | 455 |
|
456 | 456 |
void sparse(int d) |
457 | 457 |
{ |
458 | 458 |
Counter cnt("Number of arcs removed: "); |
459 | 459 |
Bfs<ListGraph> bfs(g); |
460 | 460 |
for(std::vector<Edge>::reverse_iterator ei=arcs.rbegin(); |
461 | 461 |
ei!=arcs.rend();++ei) |
462 | 462 |
{ |
463 | 463 |
Node a=g.u(*ei); |
464 | 464 |
Node b=g.v(*ei); |
465 | 465 |
g.erase(*ei); |
466 | 466 |
bfs.run(a,b); |
467 | 467 |
if(bfs.predArc(b)==INVALID || bfs.dist(b)>d) |
468 | 468 |
g.addEdge(a,b); |
469 | 469 |
else cnt++; |
470 | 470 |
} |
471 | 471 |
} |
472 | 472 |
|
473 | 473 |
void sparse2(int d) |
474 | 474 |
{ |
475 | 475 |
Counter cnt("Number of arcs removed: "); |
476 | 476 |
for(std::vector<Edge>::reverse_iterator ei=arcs.rbegin(); |
477 | 477 |
ei!=arcs.rend();++ei) |
478 | 478 |
{ |
479 | 479 |
Node a=g.u(*ei); |
480 | 480 |
Node b=g.v(*ei); |
481 | 481 |
g.erase(*ei); |
482 | 482 |
ConstMap<Arc,int> cegy(1); |
483 | 483 |
Suurballe<ListGraph,ConstMap<Arc,int> > sur(g,cegy,a,b); |
484 | 484 |
int k=sur.run(2); |
485 | 485 |
if(k<2 || sur.totalLength()>d) |
486 | 486 |
g.addEdge(a,b); |
487 | 487 |
else cnt++; |
488 | 488 |
// else std::cout << "Remove arc " << g.id(a) << "-" << g.id(b) << '\n'; |
489 | 489 |
} |
490 | 490 |
} |
491 | 491 |
|
492 | 492 |
void sparseTriangle(int d) |
493 | 493 |
{ |
494 | 494 |
Counter cnt("Number of arcs added: "); |
495 | 495 |
std::vector<Parc> pedges; |
496 | 496 |
for(NodeIt n(g);n!=INVALID;++n) |
497 | 497 |
for(NodeIt m=++(NodeIt(n));m!=INVALID;++m) |
498 | 498 |
{ |
499 | 499 |
Parc p; |
500 | 500 |
p.a=n; |
501 | 501 |
p.b=m; |
502 | 502 |
p.len=(coords[m]-coords[n]).normSquare(); |
503 | 503 |
pedges.push_back(p); |
504 | 504 |
} |
505 | 505 |
std::sort(pedges.begin(),pedges.end(),pedgeLess); |
506 | 506 |
for(std::vector<Parc>::iterator pi=pedges.begin();pi!=pedges.end();++pi) |
507 | 507 |
{ |
508 | 508 |
Line li(pi->a,pi->b); |
509 | 509 |
EdgeIt e(g); |
510 | 510 |
for(;e!=INVALID && !cross(e,li);++e) ; |
511 | 511 |
Edge ne; |
512 | 512 |
if(e==INVALID) { |
513 | 513 |
ConstMap<Arc,int> cegy(1); |
514 | 514 |
Suurballe<ListGraph,ConstMap<Arc,int> > |
515 | 515 |
sur(g,cegy,pi->a,pi->b); |
516 | 516 |
int k=sur.run(2); |
517 | 517 |
if(k<2 || sur.totalLength()>d) |
518 | 518 |
{ |
519 | 519 |
ne=g.addEdge(pi->a,pi->b); |
520 | 520 |
arcs.push_back(ne); |
521 | 521 |
cnt++; |
522 | 522 |
} |
523 | 523 |
} |
524 | 524 |
} |
525 | 525 |
} |
526 | 526 |
|
527 | 527 |
template <typename Graph, typename CoordMap> |
528 | 528 |
class LengthSquareMap { |
529 | 529 |
public: |
530 | 530 |
typedef typename Graph::Edge Key; |
531 | 531 |
typedef typename CoordMap::Value::Value Value; |
532 | 532 |
|
533 | 533 |
LengthSquareMap(const Graph& graph, const CoordMap& coords) |
534 | 534 |
: _graph(graph), _coords(coords) {} |
535 | 535 |
|
536 | 536 |
Value operator[](const Key& key) const { |
537 | 537 |
return (_coords[_graph.v(key)] - |
538 | 538 |
_coords[_graph.u(key)]).normSquare(); |
539 | 539 |
} |
540 | 540 |
|
541 | 541 |
private: |
542 | 542 |
|
543 | 543 |
const Graph& _graph; |
544 | 544 |
const CoordMap& _coords; |
545 | 545 |
}; |
546 | 546 |
|
547 | 547 |
void minTree() { |
548 | 548 |
std::vector<Parc> pedges; |
549 | 549 |
Timer T; |
550 | 550 |
std::cout << T.realTime() << "s: Creating delaunay triangulation...\n"; |
551 | 551 |
delaunay(); |
552 | 552 |
std::cout << T.realTime() << "s: Calculating spanning tree...\n"; |
553 | 553 |
LengthSquareMap<ListGraph, ListGraph::NodeMap<Point> > ls(g, coords); |
554 | 554 |
ListGraph::EdgeMap<bool> tree(g); |
555 | 555 |
kruskal(g, ls, tree); |
556 | 556 |
std::cout << T.realTime() << "s: Removing non tree arcs...\n"; |
557 | 557 |
std::vector<Edge> remove; |
558 | 558 |
for (EdgeIt e(g); e != INVALID; ++e) { |
559 | 559 |
if (!tree[e]) remove.push_back(e); |
560 | 560 |
} |
561 | 561 |
for(int i = 0; i < int(remove.size()); ++i) { |
562 | 562 |
g.erase(remove[i]); |
563 | 563 |
} |
564 | 564 |
std::cout << T.realTime() << "s: Done\n"; |
565 | 565 |
} |
566 | 566 |
|
567 | 567 |
void tsp2() |
568 | 568 |
{ |
569 | 569 |
std::cout << "Find a tree..." << std::endl; |
570 | 570 |
|
571 | 571 |
minTree(); |
572 | 572 |
|
573 | 573 |
std::cout << "Total arc length (tree) : " << totalLen() << std::endl; |
574 | 574 |
|
575 | 575 |
std::cout << "Make it Euler..." << std::endl; |
576 | 576 |
|
577 | 577 |
{ |
578 | 578 |
std::vector<Node> leafs; |
579 | 579 |
for(NodeIt n(g);n!=INVALID;++n) |
580 | 580 |
if(countIncEdges(g,n)%2==1) leafs.push_back(n); |
581 | 581 |
|
582 | 582 |
// for(unsigned int i=0;i<leafs.size();i+=2) |
583 | 583 |
// g.addArc(leafs[i],leafs[i+1]); |
584 | 584 |
|
585 | 585 |
std::vector<Parc> pedges; |
586 | 586 |
for(unsigned int i=0;i<leafs.size()-1;i++) |
587 | 587 |
for(unsigned int j=i+1;j<leafs.size();j++) |
588 | 588 |
{ |
589 | 589 |
Node n=leafs[i]; |
590 | 590 |
Node m=leafs[j]; |
591 | 591 |
Parc p; |
592 | 592 |
p.a=n; |
593 | 593 |
p.b=m; |
594 | 594 |
p.len=(coords[m]-coords[n]).normSquare(); |
595 | 595 |
pedges.push_back(p); |
596 | 596 |
} |
597 | 597 |
std::sort(pedges.begin(),pedges.end(),pedgeLess); |
598 | 598 |
for(unsigned int i=0;i<pedges.size();i++) |
599 | 599 |
if(countIncEdges(g,pedges[i].a)%2 && |
600 | 600 |
countIncEdges(g,pedges[i].b)%2) |
601 | 601 |
g.addEdge(pedges[i].a,pedges[i].b); |
602 | 602 |
} |
603 | 603 |
|
604 | 604 |
for(NodeIt n(g);n!=INVALID;++n) |
605 | 605 |
if(countIncEdges(g,n)%2 || countIncEdges(g,n)==0 ) |
606 | 606 |
std::cout << "GEBASZ!!!" << std::endl; |
607 | 607 |
|
608 | 608 |
for(EdgeIt e(g);e!=INVALID;++e) |
609 | 609 |
if(g.u(e)==g.v(e)) |
610 | 610 |
std::cout << "LOOP GEBASZ!!!" << std::endl; |
611 | 611 |
|
612 | 612 |
std::cout << "Number of arcs : " << countEdges(g) << std::endl; |
613 | 613 |
|
614 | 614 |
std::cout << "Total arc length (euler) : " << totalLen() << std::endl; |
615 | 615 |
|
616 | 616 |
ListGraph::EdgeMap<Arc> enext(g); |
617 | 617 |
{ |
618 | 618 |
EulerIt<ListGraph> e(g); |
619 | 619 |
Arc eo=e; |
620 | 620 |
Arc ef=e; |
621 | 621 |
// std::cout << "Tour arc: " << g.id(Edge(e)) << std::endl; |
622 | 622 |
for(++e;e!=INVALID;++e) |
623 | 623 |
{ |
624 | 624 |
// std::cout << "Tour arc: " << g.id(Edge(e)) << std::endl; |
625 | 625 |
enext[eo]=e; |
626 | 626 |
eo=e; |
627 | 627 |
} |
628 | 628 |
enext[eo]=ef; |
629 | 629 |
} |
630 | 630 |
|
631 | 631 |
std::cout << "Creating a tour from that..." << std::endl; |
632 | 632 |
|
633 | 633 |
int nnum = countNodes(g); |
634 | 634 |
int ednum = countEdges(g); |
635 | 635 |
|
636 | 636 |
for(Arc p=enext[EdgeIt(g)];ednum>nnum;p=enext[p]) |
637 | 637 |
{ |
638 | 638 |
// std::cout << "Checking arc " << g.id(p) << std::endl; |
639 | 639 |
Arc e=enext[p]; |
640 | 640 |
Arc f=enext[e]; |
641 | 641 |
Node n2=g.source(f); |
642 | 642 |
Node n1=g.oppositeNode(n2,e); |
643 | 643 |
Node n3=g.oppositeNode(n2,f); |
644 | 644 |
if(countIncEdges(g,n2)>2) |
645 | 645 |
{ |
646 | 646 |
// std::cout << "Remove an Arc" << std::endl; |
647 | 647 |
Arc ff=enext[f]; |
648 | 648 |
g.erase(e); |
649 | 649 |
g.erase(f); |
650 | 650 |
if(n1!=n3) |
651 | 651 |
{ |
652 | 652 |
Arc ne=g.direct(g.addEdge(n1,n3),n1); |
653 | 653 |
enext[p]=ne; |
654 | 654 |
enext[ne]=ff; |
655 | 655 |
ednum--; |
656 | 656 |
} |
657 | 657 |
else { |
658 | 658 |
enext[p]=ff; |
659 | 659 |
ednum-=2; |
660 | 660 |
} |
661 | 661 |
} |
662 | 662 |
} |
663 | 663 |
|
664 | 664 |
std::cout << "Total arc length (tour) : " << totalLen() << std::endl; |
665 | 665 |
|
666 | 666 |
std::cout << "2-opt the tour..." << std::endl; |
667 | 667 |
|
668 | 668 |
tsp_improve(); |
669 | 669 |
|
670 | 670 |
std::cout << "Total arc length (2-opt tour) : " << totalLen() << std::endl; |
671 | 671 |
} |
672 | 672 |
|
673 | 673 |
|
674 | 674 |
int main(int argc,const char **argv) |
675 | 675 |
{ |
676 | 676 |
ArgParser ap(argc,argv); |
677 | 677 |
|
678 | 678 |
// bool eps; |
679 | 679 |
bool disc_d, square_d, gauss_d; |
680 | 680 |
// bool tsp_a,two_a,tree_a; |
681 | 681 |
int num_of_cities=1; |
682 | 682 |
double area=1; |
683 | 683 |
N=100; |
684 | 684 |
// girth=10; |
685 | 685 |
std::string ndist("disc"); |
686 | 686 |
ap.refOption("n", "Number of nodes (default is 100)", N) |
687 | 687 |
.intOption("g", "Girth parameter (default is 10)", 10) |
688 | 688 |
.refOption("cities", "Number of cities (default is 1)", num_of_cities) |
689 | 689 |
.refOption("area", "Full relative area of the cities (default is 1)", area) |
690 | 690 |
.refOption("disc", "Nodes are evenly distributed on a unit disc (default)",disc_d) |
691 | 691 |
.optionGroup("dist", "disc") |
692 | 692 |
.refOption("square", "Nodes are evenly distributed on a unit square", square_d) |
693 | 693 |
.optionGroup("dist", "square") |
694 | 694 |
.refOption("gauss", |
695 | 695 |
"Nodes are located according to a two-dim gauss distribution", |
696 | 696 |
gauss_d) |
697 | 697 |
.optionGroup("dist", "gauss") |
698 | 698 |
// .mandatoryGroup("dist") |
699 | 699 |
.onlyOneGroup("dist") |
700 | 700 |
.boolOption("eps", "Also generate .eps output (prefix.eps)") |
701 | 701 |
.boolOption("nonodes", "Draw the edges only in the generated .eps") |
702 | 702 |
.boolOption("dir", "Directed digraph is generated (each arcs are replaced by two directed ones)") |
703 | 703 |
.boolOption("2con", "Create a two connected planar digraph") |
704 | 704 |
.optionGroup("alg","2con") |
705 | 705 |
.boolOption("tree", "Create a min. cost spanning tree") |
706 | 706 |
.optionGroup("alg","tree") |
707 | 707 |
.boolOption("tsp", "Create a TSP tour") |
708 | 708 |
.optionGroup("alg","tsp") |
709 | 709 |
.boolOption("tsp2", "Create a TSP tour (tree based)") |
710 | 710 |
.optionGroup("alg","tsp2") |
711 | 711 |
.boolOption("dela", "Delaunay triangulation digraph") |
712 | 712 |
.optionGroup("alg","dela") |
713 | 713 |
.onlyOneGroup("alg") |
714 | 714 |
.boolOption("rand", "Use time seed for random number generator") |
715 | 715 |
.optionGroup("rand", "rand") |
716 | 716 |
.intOption("seed", "Random seed", -1) |
717 | 717 |
.optionGroup("rand", "seed") |
718 | 718 |
.onlyOneGroup("rand") |
719 | 719 |
.other("[prefix]","Prefix of the output files. Default is 'lgf-gen-out'") |
720 | 720 |
.run(); |
721 | 721 |
|
722 | 722 |
if (ap["rand"]) { |
723 | 723 |
int seed = time(0); |
724 | 724 |
std::cout << "Random number seed: " << seed << std::endl; |
725 | 725 |
rnd = Random(seed); |
726 | 726 |
} |
727 | 727 |
if (ap.given("seed")) { |
728 | 728 |
int seed = ap["seed"]; |
729 | 729 |
std::cout << "Random number seed: " << seed << std::endl; |
730 | 730 |
rnd = Random(seed); |
731 | 731 |
} |
732 | 732 |
|
733 | 733 |
std::string prefix; |
734 | 734 |
switch(ap.files().size()) |
735 | 735 |
{ |
736 | 736 |
case 0: |
737 | 737 |
prefix="lgf-gen-out"; |
738 | 738 |
break; |
739 | 739 |
case 1: |
740 | 740 |
prefix=ap.files()[0]; |
741 | 741 |
break; |
742 | 742 |
default: |
743 | 743 |
std::cerr << "\nAt most one prefix can be given\n\n"; |
744 | 744 |
exit(1); |
745 | 745 |
} |
746 | 746 |
|
747 | 747 |
double sum_sizes=0; |
748 | 748 |
std::vector<double> sizes; |
749 | 749 |
std::vector<double> cum_sizes; |
750 | 750 |
for(int s=0;s<num_of_cities;s++) |
751 | 751 |
{ |
752 | 752 |
// sum_sizes+=rnd.exponential(); |
753 | 753 |
double d=rnd(); |
754 | 754 |
sum_sizes+=d; |
755 | 755 |
sizes.push_back(d); |
756 | 756 |
cum_sizes.push_back(sum_sizes); |
757 | 757 |
} |
758 | 758 |
int i=0; |
759 | 759 |
for(int s=0;s<num_of_cities;s++) |
760 | 760 |
{ |
761 | 761 |
Point center=(num_of_cities==1?Point(0,0):rnd.disc()); |
762 | 762 |
if(gauss_d) |
763 | 763 |
for(;i<N*(cum_sizes[s]/sum_sizes);i++) { |
764 | 764 |
Node n=g.addNode(); |
765 | 765 |
nodes.push_back(n); |
766 | 766 |
coords[n]=center+rnd.gauss2()*area* |
767 | 767 |
std::sqrt(sizes[s]/sum_sizes); |
768 | 768 |
} |
769 | 769 |
else if(square_d) |
770 | 770 |
for(;i<N*(cum_sizes[s]/sum_sizes);i++) { |
771 | 771 |
Node n=g.addNode(); |
772 | 772 |
nodes.push_back(n); |
773 | 773 |
coords[n]=center+Point(rnd()*2-1,rnd()*2-1)*area* |
774 | 774 |
std::sqrt(sizes[s]/sum_sizes); |
775 | 775 |
} |
776 | 776 |
else if(disc_d || true) |
777 | 777 |
for(;i<N*(cum_sizes[s]/sum_sizes);i++) { |
778 | 778 |
Node n=g.addNode(); |
779 | 779 |
nodes.push_back(n); |
780 | 780 |
coords[n]=center+rnd.disc()*area* |
781 | 781 |
std::sqrt(sizes[s]/sum_sizes); |
782 | 782 |
} |
783 | 783 |
} |
784 | 784 |
|
785 | 785 |
// for (ListGraph::NodeIt n(g); n != INVALID; ++n) { |
786 | 786 |
// std::cerr << coords[n] << std::endl; |
787 | 787 |
// } |
788 | 788 |
|
789 | 789 |
if(ap["tsp"]) { |
790 | 790 |
tsp(); |
791 | 791 |
std::cout << "#2-opt improvements: " << tsp_impr_num << std::endl; |
792 | 792 |
} |
793 | 793 |
if(ap["tsp2"]) { |
794 | 794 |
tsp2(); |
795 | 795 |
std::cout << "#2-opt improvements: " << tsp_impr_num << std::endl; |
796 | 796 |
} |
797 | 797 |
else if(ap["2con"]) { |
798 | 798 |
std::cout << "Make triangles\n"; |
799 | 799 |
// triangle(); |
800 | 800 |
sparseTriangle(ap["g"]); |
801 | 801 |
std::cout << "Make it sparser\n"; |
802 | 802 |
sparse2(ap["g"]); |
803 | 803 |
} |
804 | 804 |
else if(ap["tree"]) { |
805 | 805 |
minTree(); |
806 | 806 |
} |
807 | 807 |
else if(ap["dela"]) { |
808 | 808 |
delaunay(); |
809 | 809 |
} |
810 | 810 |
|
811 | 811 |
|
812 | 812 |
std::cout << "Number of nodes : " << countNodes(g) << std::endl; |
813 | 813 |
std::cout << "Number of arcs : " << countEdges(g) << std::endl; |
814 | 814 |
double tlen=0; |
815 | 815 |
for(EdgeIt e(g);e!=INVALID;++e) |
816 |
tlen+=sqrt((coords[g.v(e)]-coords[g.u(e)]).normSquare()); |
|
816 |
tlen+=std::sqrt((coords[g.v(e)]-coords[g.u(e)]).normSquare()); |
|
817 | 817 |
std::cout << "Total arc length : " << tlen << std::endl; |
818 | 818 |
|
819 | 819 |
if(ap["eps"]) |
820 | 820 |
graphToEps(g,prefix+".eps").scaleToA4(). |
821 | 821 |
scale(600).nodeScale(.005).arcWidthScale(.001).preScale(false). |
822 | 822 |
coords(coords).hideNodes(ap.given("nonodes")).run(); |
823 | 823 |
|
824 | 824 |
if(ap["dir"]) |
825 | 825 |
DigraphWriter<ListGraph>(g,prefix+".lgf"). |
826 | 826 |
nodeMap("coordinates_x",scaleMap(xMap(coords),600)). |
827 | 827 |
nodeMap("coordinates_y",scaleMap(yMap(coords),600)). |
828 | 828 |
run(); |
829 | 829 |
else GraphWriter<ListGraph>(g,prefix+".lgf"). |
830 | 830 |
nodeMap("coordinates_x",scaleMap(xMap(coords),600)). |
831 | 831 |
nodeMap("coordinates_y",scaleMap(yMap(coords),600)). |
832 | 832 |
run(); |
833 | 833 |
} |
834 | 834 |
|
0 comments (0 inline)