Changes in / [999:00f8d9f9920d:1000:404b98971e1f] in lemon-main
- Files:
-
- 8 added
- 17 deleted
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
AUTHORS
r951 r992 1 The authors of the 1.x seriesare1 The main developers of release series 1.x are 2 2 3 3 * Balazs Dezso <deba@inf.elte.hu> … … 6 6 * Akos Ladanyi <ladanyi@tmit.bme.hu> 7 7 8 For more details on the actual contribution, please visit the history9 of the main LEMON source repository: http://lemon.cs.elte.hu/hg/lemon8 For more complete list of contributors, please visit the history of 9 the LEMON source code repository: http://lemon.cs.elte.hu/hg/lemon 10 10 11 Moreover, this version is heavily based on the 0.x series of12 LEMON. Hereis the list of people who contributed to those versions.11 Moreover, this version is heavily based on version 0.x of LEMON. Here 12 is the list of people who contributed to those versions. 13 13 14 14 * Mihaly Barasz <klao@cs.elte.hu> -
CMakeLists.txt
r998 r1000 13 13 ELSE() 14 14 EXECUTE_PROCESS( 15 COMMAND ${PYTHON_EXECUTABLE} ./scripts/chg-len.py 15 COMMAND 16 hg log -r. --template "{latesttag}" 16 17 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 17 OUTPUT_VARIABLE HG_REVISION_ PATH18 OUTPUT_VARIABLE HG_REVISION_TAG 18 19 ERROR_QUIET 19 20 OUTPUT_STRIP_TRAILING_WHITESPACE 20 21 ) 21 22 EXECUTE_PROCESS( 22 COMMAND hg id -i 23 COMMAND 24 hg log -r. --template "{latesttagdistance}" 23 25 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 24 OUTPUT_VARIABLE HG_REVISION 26 OUTPUT_VARIABLE HG_REVISION_DIST 25 27 ERROR_QUIET 26 28 OUTPUT_STRIP_TRAILING_WHITESPACE 27 29 ) 28 IF(HG_REVISION STREQUAL "") 30 EXECUTE_PROCESS( 31 COMMAND 32 hg log -r. --template "{node|short}" 33 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 34 OUTPUT_VARIABLE HG_REVISION_ID 35 ERROR_QUIET 36 OUTPUT_STRIP_TRAILING_WHITESPACE 37 ) 38 39 IF(HG_REVISION_TAG STREQUAL "") 29 40 SET(HG_REVISION_ID "hg-tip") 30 41 ELSE() 31 IF(HG_REVISION_PATH STREQUAL "") 32 SET(HG_REVISION_ID ${HG_REVISION}) 42 IF(HG_REVISION_TAG STREQUAL "null") 43 SET(HG_REVISION_TAG "trunk") 44 ELSEIF(HG_REVISION_TAG MATCHES "^r") 45 STRING(SUBSTRING ${HG_REVISION_TAG} 1 -1 HG_REVISION_TAG) 46 ENDIF() 47 IF(HG_REVISION_DIST STREQUAL "0") 48 SET(HG_REVISION ${HG_REVISION_TAG}) 33 49 ELSE() 34 SET(HG_REVISION_ID ${HG_REVISION_PATH}.${HG_REVISION}) 50 SET(HG_REVISION 51 "${HG_REVISION_TAG}+${HG_REVISION_DIST}-${HG_REVISION_ID}") 35 52 ENDIF() 36 53 ENDIF() 37 SET(LEMON_VERSION ${HG_REVISION_ID} CACHE STRING "LEMON version string.") 54 55 SET(LEMON_VERSION ${HG_REVISION} CACHE STRING "LEMON version string.") 38 56 ENDIF() 39 57 … … 115 133 SET(LEMON_HAVE_LONG_LONG ${HAVE_LONG_LONG}) 116 134 117 INCLUDE(FindPythonInterp) 135 INCLUDE(FindThreads) 136 137 IF(NOT LEMON_THREADING) 138 IF(CMAKE_USE_PTHREADS_INIT) 139 SET(LEMON_THREADING "Pthread") 140 ELSEIF(CMAKE_USE_WIN32_THREADS_INIT) 141 SET(LEMON_THREADING "Win32") 142 ELSE() 143 SET(LEMON_THREADING "None") 144 ENDIF() 145 ENDIF() 146 147 SET( LEMON_THREADING "${LEMON_THREADING}" CACHE STRING 148 "Choose the threading library, options are: Pthread Win32 None." 149 FORCE ) 150 151 IF(LEMON_THREADING STREQUAL "Pthread") 152 SET(LEMON_USE_PTHREAD TRUE) 153 ELSEIF(LEMON_THREADING STREQUAL "Win32") 154 SET(LEMON_USE_WIN32_THREADS TRUE) 155 ENDIF() 118 156 119 157 ENABLE_TESTING() … … 127 165 ADD_SUBDIRECTORY(lemon) 128 166 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) 167 ADD_SUBDIRECTORY(contrib) 129 168 ADD_SUBDIRECTORY(demo) 130 169 ADD_SUBDIRECTORY(tools) … … 150 189 ENDIF() 151 190 191 CONFIGURE_FILE( 192 ${PROJECT_SOURCE_DIR}/cmake/version.cmake.in 193 ${PROJECT_BINARY_DIR}/cmake/version.cmake 194 @ONLY 195 ) 196 197 SET(ARCHIVE_BASE_NAME ${CMAKE_PROJECT_NAME}) 198 STRING(TOLOWER ${ARCHIVE_BASE_NAME} ARCHIVE_BASE_NAME) 199 SET(ARCHIVE_NAME ${ARCHIVE_BASE_NAME}-${PROJECT_VERSION}) 200 ADD_CUSTOM_TARGET(dist 201 COMMAND cmake -E remove_directory ${ARCHIVE_NAME} 202 COMMAND hg archive ${ARCHIVE_NAME} 203 COMMAND cmake -E copy cmake/version.cmake ${ARCHIVE_NAME}/cmake/version.cmake 204 COMMAND tar -czf ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_NAME} 205 COMMAND zip -r ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.zip ${ARCHIVE_NAME} 206 COMMAND cmake -E copy_directory doc/html ${ARCHIVE_NAME}/doc/html 207 COMMAND tar -czf ${ARCHIVE_NAME}.tar.gz ${ARCHIVE_NAME} 208 COMMAND zip -r ${ARCHIVE_NAME}.zip ${ARCHIVE_NAME} 209 COMMAND cmake -E copy_directory doc/html ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION} 210 COMMAND tar -czf ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION} 211 COMMAND zip -r ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.zip ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION} 212 COMMAND cmake -E remove_directory ${ARCHIVE_NAME} 213 COMMAND cmake -E remove_directory ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION} 214 DEPENDS html 215 WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) 216 217 # CPACK config (Basically for NSIS) 152 218 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) 153 219 SET(CPACK_PACKAGE_NAME ${PROJECT_NAME}) -
INSTALL
r824 r992 2 2 ========================= 3 3 4 Since you are reading this I assume you already obtained one of the release 5 tarballs and successfully extracted it. The latest version of LEMON is 6 available at our web page (http://lemon.cs.elte.hu/). 4 This file contains instructions for building and installing LEMON from 5 source on Linux. The process on Windows is similar. 7 6 8 LEMON provides two different build environments, one is based on "autotool", 9 while the other is based on "cmake". This file contains instructions only for 10 the former one, which is the recommended build environment on Linux, Mac OSX 11 and other unices or if you use Cygwin on Windows. For cmake installation 12 instructions visit http://lemon.cs.elte.hu. 7 Note that it is not necessary to install LEMON in order to use 8 it. Instead, you can easily integrate it with your own code 9 directly. For instructions, see 10 https://lemon.cs.elte.hu/trac/lemon/wiki/HowToCompile 11 13 12 14 13 In order to install LEMON from the extracted source tarball you have to 15 14 issue the following commands: 16 15 17 1. `cd lemon-x.y.z'16 1. Step into the root of the source directory. 18 17 19 This command changes to the directory which was created when you 20 extracted the sources. The x.y.z part is a version number. 18 $ cd lemon-x.y.z 21 19 22 2. `./configure'20 2. Create a build subdirectory and step into it. 23 21 24 This command runs the configure shell script, which does some checks and25 creates the makefiles.22 $ mkdir build 23 $ cd build 26 24 27 3. `make'25 3. Perform system checks and create the makefiles. 28 26 29 This command compiles the non-template part of LEMON into libemon.a 30 file. It also compiles the programs in the tools subdirectory by 31 default. 27 $ cmake .. 32 28 33 4. `make check'29 4. Build LEMON. 34 30 35 This step is optional, but recommended. It runs the test programs that 36 we developed for LEMON to check whether the library works properly on 37 your platform. 31 $ make 38 32 39 5. `make install' 33 This command compiles the non-template part of LEMON into 34 libemon.a file. It also compiles the programs in the 'tools' and 35 'demo' subdirectories. 36 37 5. [Optional] Compile and run the self-tests. 38 39 $ make check 40 41 5. [Optional] Generate the user documentation. 42 43 $ make html 44 45 The release tarballs already include the documentation. 46 47 Note that for this step you need to have the following tools 48 installed: Python, Doxygen, Graphviz, Ghostscript, LaTeX. 49 50 6. [Optional] Install LEMON 51 52 $ make install 40 53 41 54 This command installs LEMON under /usr/local (you will need root 42 privileges to be able to do that). If you want to install it to some 43 other location, then pass the --prefix=DIRECTORY flag to configure in 44 step 2. For example: `./configure --prefix=/home/username/lemon'. 45 46 6. `make install-html' 47 48 This command installs the documentation under share/doc/lemon/docs. The 49 generated documentation is included in the tarball. If you want to 50 generate it yourself, then run `make html'. Note that for this you need 51 to have the following programs installed: Doxygen, Graphviz, Ghostscript, 52 Latex. 53 55 privileges to be able to do that). If you want to install it to 56 some other location, then pass the 57 -DCMAKE_INSTALL_PREFIX=DIRECTORY flag to cmake in Step 3. 58 For example: 59 60 $ cmake -DCMAKE_INSTALL_PREFIX=/home/username/lemon' 54 61 55 62 Configure Options and Variables 56 63 =============================== 57 64 58 In step 2 you can customize the actions of configure by setting variables 59 and passing options to it. This can be done like this: 60 `./configure [OPTION]... [VARIABLE=VALUE]...' 65 In Step 3, you can customize the build process by passing options to CMAKE. 61 66 62 Below you will find some useful variables and options (see `./configure --help' 63 for more): 67 $ cmake [OPTIONS] .. 64 68 65 CXX='comp' 69 You find a list of the most useful options below. 66 70 67 Change the C++ compiler to 'comp'. 68 69 CXXFLAGS='flags' 70 71 Pass the 'flags' to the compiler. For example CXXFLAGS='-O3 -march=pentium-m' 72 turns on generation of aggressively optimized Pentium-M specific code. 73 74 --prefix=PREFIX 71 -DCMAKE_INSTALL_PREFIX=PREFIX 75 72 76 73 Set the installation prefix to PREFIX. By default it is /usr/local. 77 74 78 - -enable-tools75 -DCMAKE_BUILD_TYPE=[Release|Debug|Maintainer|...] 79 76 80 Build the programs in the tools subdirectory (default).77 This sets the compiler options. The choices are the following 81 78 82 --disable-tools 79 'Release': A strong optimization is turned on (-O3 with gcc). This 80 is the default setting and we strongly recommend using this for 81 the final compilation. 83 82 84 Do not build the programs in the tools subdirectory. 83 'Debug': Optimization is turned off and debug info is added (-O0 84 -ggdb with gcc). If is recommended during the development. 85 85 86 --with-glpk[=PREFIX] 86 'Maintainer': The same as 'Debug' but the compiler warnings are 87 converted to errors (-Werror with gcc). In addition, 'make' will 88 also automatically compile and execute the test codes. It is the 89 best way of ensuring that LEMON codebase is clean and safe. 87 90 88 Enable GLPK support (default). You should specify the prefix too if 89 you installed GLPK to some non-standard location (e.g. your home 90 directory). If it is not found, GLPK support will be disabled. 91 'RelWithDebInfo': Optimized build with debug info. 91 92 92 --with-glpk-includedir=DIR 93 'MinSizeRel': Size optimized build (-Os with gcc) 93 94 94 The directory where the GLPK header files are located. This is only 95 useful when the GLPK headers and libraries are not under the same 96 prefix (which is unlikely). 95 -DTEST_WITH_VALGRIND=YES 97 96 98 --with-glpk-libdir=DIR 97 Using this, the test codes will be executed using valgrind. It is a 98 very effective way of identifying indexing problems and memory leaks. 99 99 100 The directory where the GLPK libraries are located. This is only 101 useful when the GLPK headers and libraries are not under the same 102 prefix (which is unlikely). 100 -DCMAKE_CXX_COMPILER=path-to-compiler 103 101 104 --without-glpk 102 Change the compiler to be used. 105 103 106 Disable GLPK support. 104 -DBUILD_SHARED_LIBS=TRUE 107 105 108 --with-cplex[=PREFIX] 106 Build shared library instead of static one. Think twice if you 107 really want to use this option. 109 108 110 Enable CPLEX support (default). You should specify the prefix too 111 if you installed CPLEX to some non-standard location 112 (e.g. /opt/ilog/cplex75). If it is not found, CPLEX support will be 113 disabled. 109 -DGLPK_ROOT_DIR=DIRECTORY 110 -DCOIN_ROOT_DIR=DIRECTORY 111 -DCPLEX_ROOT_DIR=DIRECTORY 114 112 115 --with-cplex-includedir=DIR 116 117 The directory where the CPLEX header files are located. This is 118 only useful when the CPLEX headers and libraries are not under the 119 same prefix (e.g. /usr/local/cplex/cplex75/include). 120 121 --with-cplex-libdir=DIR 122 123 The directory where the CPLEX libraries are located. This is only 124 useful when the CPLEX headers and libraries are not under the same 125 prefix (e.g. 126 /usr/local/cplex/cplex75/lib/i86_linux2_glibc2.2_gcc3.0/static_pic_mt). 127 128 --without-cplex 129 130 Disable CPLEX support. 131 132 --with-soplex[=PREFIX] 133 134 Enable SoPlex support (default). You should specify the prefix too if 135 you installed SoPlex to some non-standard location (e.g. your home 136 directory). If it is not found, SoPlex support will be disabled. 137 138 --with-soplex-includedir=DIR 139 140 The directory where the SoPlex header files are located. This is only 141 useful when the SoPlex headers and libraries are not under the same 142 prefix (which is unlikely). 143 144 --with-soplex-libdir=DIR 145 146 The directory where the SoPlex libraries are located. This is only 147 useful when the SoPlex headers and libraries are not under the same 148 prefix (which is unlikely). 149 150 --without-soplex 151 152 Disable SoPlex support. 153 154 --with-coin[=PREFIX] 155 156 Enable support for COIN-OR solvers (CLP and CBC). You should 157 specify the prefix too. (by default, COIN-OR tools install 158 themselves to the source code directory). This command enables the 159 solvers that are actually found. 160 161 --with-coin-includedir=DIR 162 163 The directory where the COIN-OR header files are located. This is 164 only useful when the COIN-OR headers and libraries are not under 165 the same prefix (which is unlikely). 166 167 --with-coin-libdir=DIR 168 169 The directory where the COIN-OR libraries are located. This is only 170 useful when the COIN-OR headers and libraries are not under the 171 same prefix (which is unlikely). 172 173 --without-coin 174 175 Disable COIN-OR support. 176 113 Install root directory prefixes of optional third party libraries. 177 114 178 115 Makefile Variables 179 116 ================== 180 117 181 Some Makefile variables are reserved by the GNU Coding Standards for 182 the use of the "user" - the person building the package. For instance, 183 CXX and CXXFLAGS are such variables, and have the same meaning as 184 explained in the previous section. These variables can be set on the 185 command line when invoking `make' like this: 186 `make [VARIABLE=VALUE]...' 118 make VERBOSE=1 187 119 188 WARNINGCXXFLAGS is a non-standard Makefile variable introduced by us 189 to hold several compiler flags related to warnings. Its default value 190 can be overridden when invoking `make'. For example to disable all 191 warning flags use `make WARNINGCXXFLAGS='. 192 193 In order to turn off a single flag from the default set of warning 194 flags, you can use the CXXFLAGS variable, since this is passed after 195 WARNINGCXXFLAGS. For example to turn off `-Wold-style-cast' (which is 196 used by default when g++ is detected) you can use 197 `make CXXFLAGS="-g -O2 -Wno-old-style-cast"'. 120 This results in a more verbose output by showing the full 121 compiler and linker commands. -
LICENSE
r879 r992 2 2 copyright/license. 3 3 4 Copyright (C) 2003-201 0Egervary Jeno Kombinatorikus Optimalizalasi4 Copyright (C) 2003-2012 Egervary Jeno Kombinatorikus Optimalizalasi 5 5 Kutatocsoport (Egervary Combinatorial Optimization Research Group, 6 6 EGRES). -
cmake/version.cmake.in
r678 r983 1 SET(LEMON_VERSION "@ PACKAGE_VERSION@" CACHE STRING "LEMON version string.")1 SET(LEMON_VERSION "@LEMON_VERSION@" CACHE STRING "LEMON version string.") -
doc/CMakeLists.txt
r964 r983 17 17 @ONLY 18 18 ) 19 20 # Copy doc from source (if exists) 21 IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/html AND 22 NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/html/index.html) 23 MESSAGE(STATUS "Copy doc from source tree") 24 EXECUTE_PROCESS( 25 COMMAND cmake -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/html ${CMAKE_CURRENT_BINARY_DIR}/html 26 ) 27 ENDIF() 19 28 20 29 IF(DOXYGEN_EXECUTABLE AND PYTHONINTERP_FOUND AND GHOSTSCRIPT_EXECUTABLE) -
doc/Doxyfile.in
r964 r966 96 96 "@abs_top_srcdir@/lemon/concepts" \ 97 97 "@abs_top_srcdir@/demo" \ 98 "@abs_top_srcdir@/contrib" \ 98 99 "@abs_top_srcdir@/tools" \ 99 100 "@abs_top_srcdir@/test/test_tools.h" \ -
doc/coding_style.dox
r440 r919 99 99 \subsection pri-loc-var Private member variables 100 100 101 Private member variables should start with underscore 101 Private member variables should start with underscore. 102 102 103 103 \code 104 _start_with_underscore s104 _start_with_underscore 105 105 \endcode 106 106 -
doc/dirs.dox
r440 r925 32 32 documentation. 33 33 */ 34 35 /** 36 \dir contrib 37 \brief Directory for user contributed source codes. 38 39 You can place your own C++ code using LEMON into this directory, which 40 will compile to an executable along with LEMON when you build the 41 library. This is probably the easiest way of compiling short to medium 42 codes, for this does require neither a LEMON installed system-wide nor 43 adding several paths to the compiler. 44 45 Please have a look at <tt>contrib/CMakeLists.txt</tt> for 46 instruction on how to add your own files into the build process. */ 34 47 35 48 /** -
doc/groups.dox
r879 r919 407 407 strongly polynomial \ref klein67primal, \ref goldberg89cyclecanceling. 408 408 409 In general NetworkSimplex is the most efficient implementation,410 but in special cases other algorithms could be faster.409 In general, \ref NetworkSimplex and \ref CostScaling are the most efficient 410 implementations, but the other two algorithms could be faster in special cases. 411 411 For example, if the total supply and/or capacities are rather small, 412 CapacityScaling is usually the fastest algorithm (without effective scaling).412 \ref CapacityScaling is usually the fastest algorithm (without effective scaling). 413 413 */ 414 414 … … 472 472 \ref dasdan98minmeancycle. 473 473 474 In practice, the \ref HowardMmc "Howard" algorithm provedto be by far the474 In practice, the \ref HowardMmc "Howard" algorithm turned out to be by far the 475 475 most efficient one, though the best known theoretical bound on its running 476 476 time is exponential. … … 540 540 541 541 /** 542 @defgroup planar Planar ityEmbedding and Drawing542 @defgroup planar Planar Embedding and Drawing 543 543 @ingroup algs 544 544 \brief Algorithms for planarity checking, embedding and drawing … … 552 552 553 553 /** 554 @defgroup approx Approximation Algorithms554 @defgroup approx_algs Approximation Algorithms 555 555 @ingroup algs 556 556 \brief Approximation algorithms. … … 558 558 This group contains the approximation and heuristic algorithms 559 559 implemented in LEMON. 560 561 <b>Maximum Clique Problem</b> 562 - \ref GrossoLocatelliPullanMc An efficient heuristic algorithm of 563 Grosso, Locatelli, and Pullan. 560 564 */ 561 565 -
doc/references.bib
r755 r904 298 298 address = {Dublin, Ireland}, 299 299 year = 1991, 300 month = sep, 301 } 300 month = sep 301 } 302 303 %%%%% Other algorithms %%%%% 304 305 @article{grosso08maxclique, 306 author = {Andrea Grosso and Marco Locatelli and Wayne Pullan}, 307 title = {Simple ingredients leading to very efficient 308 heuristics for the maximum clique problem}, 309 journal = {Journal of Heuristics}, 310 year = 2008, 311 volume = 14, 312 number = 6, 313 pages = {587--612} 314 } -
lemon/CMakeLists.txt
r968 r981 5 5 6 6 CONFIGURE_FILE( 7 ${CMAKE_CURRENT_SOURCE_DIR}/config.h. cmake7 ${CMAKE_CURRENT_SOURCE_DIR}/config.h.in 8 8 ${CMAKE_CURRENT_BINARY_DIR}/config.h 9 9 ) 10 10 11 11 CONFIGURE_FILE( 12 ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc. cmake12 ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc.in 13 13 ${CMAKE_CURRENT_BINARY_DIR}/lemon.pc 14 14 @ONLY -
lemon/bits/alteration_notifier.h
r440 r979 24 24 25 25 #include <lemon/core.h> 26 #include <lemon/bits/lock.h> 26 27 27 28 //\ingroup graphbits … … 252 253 typedef std::list<ObserverBase*> Observers; 253 254 Observers _observers; 254 255 lemon::bits::Lock _lock; 255 256 256 257 public: … … 333 334 334 335 void attach(ObserverBase& observer) { 336 _lock.lock(); 335 337 observer._index = _observers.insert(_observers.begin(), &observer); 336 338 observer._notifier = this; 339 _lock.unlock(); 337 340 } 338 341 339 342 void detach(ObserverBase& observer) { 343 _lock.lock(); 340 344 _observers.erase(observer._index); 341 345 observer._index = _observers.end(); 342 346 observer._notifier = 0; 347 _lock.unlock(); 343 348 } 344 349 -
lemon/bits/windows.cc
r941 r979 131 131 #endif 132 132 } 133 134 WinLock::WinLock() { 135 #ifdef WIN32 136 CRITICAL_SECTION *lock = new CRITICAL_SECTION; 137 InitializeCriticalSection(lock); 138 _repr = lock; 139 #endif 140 } 141 142 WinLock::~WinLock() { 143 #ifdef WIN32 144 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr); 145 DeleteCriticalSection(lock); 146 delete lock; 147 #endif 148 } 149 150 void WinLock::lock() { 151 #ifdef WIN32 152 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr); 153 EnterCriticalSection(lock); 154 #endif 155 } 156 157 void WinLock::unlock() { 158 #ifdef WIN32 159 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr); 160 LeaveCriticalSection(lock); 161 #endif 162 } 133 163 } 134 164 } -
lemon/bits/windows.h
r529 r979 29 29 std::string getWinFormattedDate(); 30 30 int getWinRndSeed(); 31 32 class WinLock { 33 public: 34 WinLock(); 35 ~WinLock(); 36 void lock(); 37 void unlock(); 38 private: 39 void *_repr; 40 }; 31 41 } 32 42 } -
lemon/capacity_scaling.h
r877 r985 87 87 /// consider to use the named template parameters instead. 88 88 /// 89 /// \warning Both number types must be signed and all input data must 90 /// be integer. 91 /// \warning This algorithm does not support negative costs for such 92 /// arcs that have infinite upper bound. 89 /// \warning Both \c V and \c C must be signed number types. 90 /// \warning Capacity bounds and supply values must be integer, but 91 /// arc costs can be arbitrary real numbers. 92 /// \warning This algorithm does not support negative costs for 93 /// arcs having infinite upper bound. 93 94 #ifdef DOXYGEN 94 95 template <typename GR, typename V, typename C, typename TR> … … 423 424 /// 424 425 /// Using this function has the same effect as using \ref supplyMap() 425 /// with sucha map in which \c k is assigned to \c s, \c -k is426 /// with a map in which \c k is assigned to \c s, \c -k is 426 427 /// assigned to \c t and all other nodes have zero supply value. 427 428 /// -
lemon/config.h.in
r678 r981 1 /* The version string */ 2 #undef LEMON_VERSION 3 4 /* Define to 1 if you have long long */ 5 #undef LEMON_HAVE_LONG_LONG 6 7 /* Define to 1 if you have any LP solver. */ 8 #undef LEMON_HAVE_LP 9 10 /* Define to 1 if you have any MIP solver. */ 11 #undef LEMON_HAVE_MIP 12 13 /* Define to 1 if you have CPLEX. */ 14 #undef LEMON_HAVE_CPLEX 15 16 /* Define to 1 if you have GLPK. */ 17 #undef LEMON_HAVE_GLPK 18 19 /* Define to 1 if you have SOPLEX */ 20 #undef LEMON_HAVE_SOPLEX 21 22 /* Define to 1 if you have CLP */ 23 #undef LEMON_HAVE_CLP 24 25 /* Define to 1 if you have CBC */ 26 #undef LEMON_HAVE_CBC 1 #define LEMON_VERSION "@PROJECT_VERSION@" 2 #cmakedefine LEMON_HAVE_LONG_LONG 1 3 #cmakedefine LEMON_HAVE_LP 1 4 #cmakedefine LEMON_HAVE_MIP 1 5 #cmakedefine LEMON_HAVE_GLPK 1 6 #cmakedefine LEMON_HAVE_CPLEX 1 7 #cmakedefine LEMON_HAVE_CLP 1 8 #cmakedefine LEMON_HAVE_CBC 1 9 #cmakedefine LEMON_USE_PTHREAD 1 10 #cmakedefine LEMON_USE_WIN32_THREADS 1 -
lemon/core.h
r998 r1000 447 447 448 448 } 449 450 /// \brief Check whether a graph is undirected. 451 /// 452 /// This function returns \c true if the given graph is undirected. 453 #ifdef DOXYGEN 454 template <typename GR> 455 bool undirected(const GR& g) { return false; } 456 #else 457 template <typename GR> 458 typename enable_if<UndirectedTagIndicator<GR>, bool>::type 459 undirected(const GR&) { 460 return true; 461 } 462 template <typename GR> 463 typename disable_if<UndirectedTagIndicator<GR>, bool>::type 464 undirected(const GR&) { 465 return false; 466 } 467 #endif 449 468 450 469 /// \brief Class to copy a digraph. -
lemon/cost_scaling.h
r931 r938 98 98 /// "preflow push-relabel" algorithm for the maximum flow problem. 99 99 /// 100 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest 101 /// implementations available in LEMON for this problem. 102 /// 100 103 /// Most of the parameters of the problem (except for the digraph) 101 104 /// can be given using separate functions, and the algorithm can be … … 114 117 /// consider to use the named template parameters instead. 115 118 /// 116 /// \warning Both number types must be signed and all input data must 119 /// \warning Both \c V and \c C must be signed number types. 120 /// \warning All input data (capacities, supply values, and costs) must 117 121 /// be integer. 118 /// \warning This algorithm does not support negative costs for such119 /// arcs that haveinfinite upper bound.122 /// \warning This algorithm does not support negative costs for 123 /// arcs having infinite upper bound. 120 124 /// 121 125 /// \note %CostScaling provides three different internal methods, … … 179 183 /// relabel operation. 180 184 /// By default, the so called \ref PARTIAL_AUGMENT 181 /// "Partial Augment-Relabel" method is used, which provedto be185 /// "Partial Augment-Relabel" method is used, which turned out to be 182 186 /// the most efficient and the most robust on various test inputs. 183 187 /// However, the other methods can be selected using the \ref run() … … 234 238 }; 235 239 236 typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap;237 240 typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap; 238 241 … … 285 288 int _max_rank; 286 289 287 // Data for a StaticDigraph structure288 typedef std::pair<int, int> IntPair;289 StaticDigraph _sgr;290 std::vector<IntPair> _arc_vec;291 std::vector<LargeCost> _cost_vec;292 LargeCostArcMap _cost_map;293 LargeCostNodeMap _pi_map;294 295 290 public: 296 291 … … 339 334 CostScaling(const GR& graph) : 340 335 _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph), 341 _cost_map(_cost_vec), _pi_map(_pi),342 336 INF(std::numeric_limits<Value>::has_infinity ? 343 337 std::numeric_limits<Value>::infinity() : … … 448 442 /// 449 443 /// Using this function has the same effect as using \ref supplyMap() 450 /// with sucha map in which \c k is assigned to \c s, \c -k is444 /// with a map in which \c k is assigned to \c s, \c -k is 451 445 /// assigned to \c t and all other nodes have zero supply value. 452 446 /// … … 494 488 /// \param method The internal method that will be used in the 495 489 /// algorithm. For more information, see \ref Method. 496 /// \param factor The cost scaling factor. It must be larger than one.490 /// \param factor The cost scaling factor. It must be at least two. 497 491 /// 498 492 /// \return \c INFEASIBLE if no feasible flow exists, … … 508 502 /// \see ProblemType, Method 509 503 /// \see resetParams(), reset() 510 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) { 504 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) { 505 LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2"); 511 506 _alpha = factor; 512 507 ProblemType pt = init(); … … 572 567 } 573 568 574 /// \brief Reset all the parameters that have been given before. 575 /// 576 /// This function resets all the paramaters that have been given 577 /// before using functions \ref lowerMap(), \ref upperMap(), 578 /// \ref costMap(), \ref supplyMap(), \ref stSupply(). 579 /// 580 /// It is useful for multiple run() calls. If this function is not 581 /// used, all the parameters given before are kept for the next 582 /// \ref run() call. 583 /// However, the underlying digraph must not be modified after this 584 /// class have been constructed, since it copies and extends the graph. 569 /// \brief Reset the internal data structures and all the parameters 570 /// that have been given before. 571 /// 572 /// This function resets the internal data structures and all the 573 /// paramaters that have been given before using functions \ref lowerMap(), 574 /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). 575 /// 576 /// It is useful for multiple \ref run() calls. By default, all the given 577 /// parameters are kept for the next \ref run() call, unless 578 /// \ref resetParams() or \ref reset() is used. 579 /// If the underlying digraph was also modified after the construction 580 /// of the class or the last \ref reset() call, then the \ref reset() 581 /// function must be used, otherwise \ref resetParams() is sufficient. 582 /// 583 /// See \ref resetParams() for examples. 584 /// 585 585 /// \return <tt>(*this)</tt> 586 /// 587 /// \see resetParams(), run() 586 588 CostScaling& reset() { 587 589 // Resize vectors … … 608 610 _excess.resize(_res_node_num); 609 611 _next_out.resize(_res_node_num); 610 611 _arc_vec.reserve(_res_arc_num);612 _cost_vec.reserve(_res_arc_num);613 612 614 613 // Copy the graph … … 887 886 } 888 887 889 return OPTIMAL;890 }891 892 // Execute the algorithm and transform the results893 void start(Method method) {894 // Maximum path length for partial augment895 const int MAX_PATH_LENGTH = 4;896 897 888 // Initialize data structures for buckets 898 889 _max_rank = _alpha * _res_node_num; … … 902 893 _rank.resize(_res_node_num + 1); 903 894 904 // Execute the algorithm 895 return OPTIMAL; 896 } 897 898 // Execute the algorithm and transform the results 899 void start(Method method) { 900 const int MAX_PARTIAL_PATH_LENGTH = 4; 901 905 902 switch (method) { 906 903 case PUSH: … … 911 908 break; 912 909 case PARTIAL_AUGMENT: 913 startAugment(MAX_PA TH_LENGTH);910 startAugment(MAX_PARTIAL_PATH_LENGTH); 914 911 break; 915 912 } 916 913 917 // Compute node potentials for the original costs 918 _arc_vec.clear(); 919 _cost_vec.clear(); 920 for (int j = 0; j != _res_arc_num; ++j) { 921 if (_res_cap[j] > 0) { 922 _arc_vec.push_back(IntPair(_source[j], _target[j])); 923 _cost_vec.push_back(_scost[j]); 924 } 925 } 926 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 927 928 typename BellmanFord<StaticDigraph, LargeCostArcMap> 929 ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map); 930 bf.distMap(_pi_map); 931 bf.init(0); 932 bf.start(); 914 // Compute node potentials (dual solution) 915 for (int i = 0; i != _res_node_num; ++i) { 916 _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha)); 917 } 918 bool optimal = true; 919 for (int i = 0; optimal && i != _res_node_num; ++i) { 920 LargeCost pi_i = _pi[i]; 921 int last_out = _first_out[i+1]; 922 for (int j = _first_out[i]; j != last_out; ++j) { 923 if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) { 924 optimal = false; 925 break; 926 } 927 } 928 } 929 930 if (!optimal) { 931 // Compute node potentials for the original costs with BellmanFord 932 // (if it is necessary) 933 typedef std::pair<int, int> IntPair; 934 StaticDigraph sgr; 935 std::vector<IntPair> arc_vec; 936 std::vector<LargeCost> cost_vec; 937 LargeCostArcMap cost_map(cost_vec); 938 939 arc_vec.clear(); 940 cost_vec.clear(); 941 for (int j = 0; j != _res_arc_num; ++j) { 942 if (_res_cap[j] > 0) { 943 int u = _source[j], v = _target[j]; 944 arc_vec.push_back(IntPair(u, v)); 945 cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]); 946 } 947 } 948 sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end()); 949 950 typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create 951 bf(sgr, cost_map); 952 bf.init(0); 953 bf.start(); 954 955 for (int i = 0; i != _res_node_num; ++i) { 956 _pi[i] += bf.dist(sgr.node(i)); 957 } 958 } 959 960 // Shift potentials to meet the requirements of the GEQ type 961 // optimality conditions 962 LargeCost max_pot = _pi[_root]; 963 for (int i = 0; i != _res_node_num; ++i) { 964 if (_pi[i] > max_pot) max_pot = _pi[i]; 965 } 966 if (max_pot != 0) { 967 for (int i = 0; i != _res_node_num; ++i) { 968 _pi[i] -= max_pot; 969 } 970 } 933 971 934 972 // Handle non-zero lower bounds … … 948 986 LargeCost pi_u = _pi[u]; 949 987 for (int a = _first_out[u]; a != last_out; ++a) { 950 int v = _target[a]; 951 if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) { 952 Value delta = _res_cap[a]; 953 _excess[u] -= delta; 954 _excess[v] += delta; 955 _res_cap[a] = 0; 956 _res_cap[_reverse[a]] += delta; 988 Value delta = _res_cap[a]; 989 if (delta > 0) { 990 int v = _target[a]; 991 if (_cost[a] + pi_u - _pi[v] < 0) { 992 _excess[u] -= delta; 993 _excess[v] += delta; 994 _res_cap[a] = 0; 995 _res_cap[_reverse[a]] += delta; 996 } 957 997 } 958 998 } … … 970 1010 } 971 1011 972 // Early termination heuristic 973 bool earlyTermination() { 974 const double EARLY_TERM_FACTOR = 3.0; 975 976 // Build a static residual graph 977 _arc_vec.clear(); 978 _cost_vec.clear(); 979 for (int j = 0; j != _res_arc_num; ++j) { 980 if (_res_cap[j] > 0) { 981 _arc_vec.push_back(IntPair(_source[j], _target[j])); 982 _cost_vec.push_back(_cost[j] + 1); 983 } 984 } 985 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 986 987 // Run Bellman-Ford algorithm to check if the current flow is optimal 988 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 989 bf.init(0); 990 bool done = false; 991 int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num))); 992 for (int i = 0; i < K && !done; ++i) { 993 done = bf.processNextWeakRound(); 994 } 995 return done; 1012 // Price (potential) refinement heuristic 1013 bool priceRefinement() { 1014 1015 // Stack for stroing the topological order 1016 IntVector stack(_res_node_num); 1017 int stack_top; 1018 1019 // Perform phases 1020 while (topologicalSort(stack, stack_top)) { 1021 1022 // Compute node ranks in the acyclic admissible network and 1023 // store the nodes in buckets 1024 for (int i = 0; i != _res_node_num; ++i) { 1025 _rank[i] = 0; 1026 } 1027 const int bucket_end = _root + 1; 1028 for (int r = 0; r != _max_rank; ++r) { 1029 _buckets[r] = bucket_end; 1030 } 1031 int top_rank = 0; 1032 for ( ; stack_top >= 0; --stack_top) { 1033 int u = stack[stack_top], v; 1034 int rank_u = _rank[u]; 1035 1036 LargeCost rc, pi_u = _pi[u]; 1037 int last_out = _first_out[u+1]; 1038 for (int a = _first_out[u]; a != last_out; ++a) { 1039 if (_res_cap[a] > 0) { 1040 v = _target[a]; 1041 rc = _cost[a] + pi_u - _pi[v]; 1042 if (rc < 0) { 1043 LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon); 1044 if (nrc < LargeCost(_max_rank)) { 1045 int new_rank_v = rank_u + static_cast<int>(nrc); 1046 if (new_rank_v > _rank[v]) { 1047 _rank[v] = new_rank_v; 1048 } 1049 } 1050 } 1051 } 1052 } 1053 1054 if (rank_u > 0) { 1055 top_rank = std::max(top_rank, rank_u); 1056 int bfirst = _buckets[rank_u]; 1057 _bucket_next[u] = bfirst; 1058 _bucket_prev[bfirst] = u; 1059 _buckets[rank_u] = u; 1060 } 1061 } 1062 1063 // Check if the current flow is epsilon-optimal 1064 if (top_rank == 0) { 1065 return true; 1066 } 1067 1068 // Process buckets in top-down order 1069 for (int rank = top_rank; rank > 0; --rank) { 1070 while (_buckets[rank] != bucket_end) { 1071 // Remove the first node from the current bucket 1072 int u = _buckets[rank]; 1073 _buckets[rank] = _bucket_next[u]; 1074 1075 // Search the outgoing arcs of u 1076 LargeCost rc, pi_u = _pi[u]; 1077 int last_out = _first_out[u+1]; 1078 int v, old_rank_v, new_rank_v; 1079 for (int a = _first_out[u]; a != last_out; ++a) { 1080 if (_res_cap[a] > 0) { 1081 v = _target[a]; 1082 old_rank_v = _rank[v]; 1083 1084 if (old_rank_v < rank) { 1085 1086 // Compute the new rank of node v 1087 rc = _cost[a] + pi_u - _pi[v]; 1088 if (rc < 0) { 1089 new_rank_v = rank; 1090 } else { 1091 LargeCost nrc = rc / _epsilon; 1092 new_rank_v = 0; 1093 if (nrc < LargeCost(_max_rank)) { 1094 new_rank_v = rank - 1 - static_cast<int>(nrc); 1095 } 1096 } 1097 1098 // Change the rank of node v 1099 if (new_rank_v > old_rank_v) { 1100 _rank[v] = new_rank_v; 1101 1102 // Remove v from its old bucket 1103 if (old_rank_v > 0) { 1104 if (_buckets[old_rank_v] == v) { 1105 _buckets[old_rank_v] = _bucket_next[v]; 1106 } else { 1107 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1108 _bucket_next[pv] = nv; 1109 _bucket_prev[nv] = pv; 1110 } 1111 } 1112 1113 // Insert v into its new bucket 1114 int nv = _buckets[new_rank_v]; 1115 _bucket_next[v] = nv; 1116 _bucket_prev[nv] = v; 1117 _buckets[new_rank_v] = v; 1118 } 1119 } 1120 } 1121 } 1122 1123 // Refine potential of node u 1124 _pi[u] -= rank * _epsilon; 1125 } 1126 } 1127 1128 } 1129 1130 return false; 1131 } 1132 1133 // Find and cancel cycles in the admissible network and 1134 // determine topological order using DFS 1135 bool topologicalSort(IntVector &stack, int &stack_top) { 1136 const int MAX_CYCLE_CANCEL = 1; 1137 1138 BoolVector reached(_res_node_num, false); 1139 BoolVector processed(_res_node_num, false); 1140 IntVector pred(_res_node_num); 1141 for (int i = 0; i != _res_node_num; ++i) { 1142 _next_out[i] = _first_out[i]; 1143 } 1144 stack_top = -1; 1145 1146 int cycle_cnt = 0; 1147 for (int start = 0; start != _res_node_num; ++start) { 1148 if (reached[start]) continue; 1149 1150 // Start DFS search from this start node 1151 pred[start] = -1; 1152 int tip = start, v; 1153 while (true) { 1154 // Check the outgoing arcs of the current tip node 1155 reached[tip] = true; 1156 LargeCost pi_tip = _pi[tip]; 1157 int a, last_out = _first_out[tip+1]; 1158 for (a = _next_out[tip]; a != last_out; ++a) { 1159 if (_res_cap[a] > 0) { 1160 v = _target[a]; 1161 if (_cost[a] + pi_tip - _pi[v] < 0) { 1162 if (!reached[v]) { 1163 // A new node is reached 1164 reached[v] = true; 1165 pred[v] = tip; 1166 _next_out[tip] = a; 1167 tip = v; 1168 a = _next_out[tip]; 1169 last_out = _first_out[tip+1]; 1170 break; 1171 } 1172 else if (!processed[v]) { 1173 // A cycle is found 1174 ++cycle_cnt; 1175 _next_out[tip] = a; 1176 1177 // Find the minimum residual capacity along the cycle 1178 Value d, delta = _res_cap[a]; 1179 int u, delta_node = tip; 1180 for (u = tip; u != v; ) { 1181 u = pred[u]; 1182 d = _res_cap[_next_out[u]]; 1183 if (d <= delta) { 1184 delta = d; 1185 delta_node = u; 1186 } 1187 } 1188 1189 // Augment along the cycle 1190 _res_cap[a] -= delta; 1191 _res_cap[_reverse[a]] += delta; 1192 for (u = tip; u != v; ) { 1193 u = pred[u]; 1194 int ca = _next_out[u]; 1195 _res_cap[ca] -= delta; 1196 _res_cap[_reverse[ca]] += delta; 1197 } 1198 1199 // Check the maximum number of cycle canceling 1200 if (cycle_cnt >= MAX_CYCLE_CANCEL) { 1201 return false; 1202 } 1203 1204 // Roll back search to delta_node 1205 if (delta_node != tip) { 1206 for (u = tip; u != delta_node; u = pred[u]) { 1207 reached[u] = false; 1208 } 1209 tip = delta_node; 1210 a = _next_out[tip] + 1; 1211 last_out = _first_out[tip+1]; 1212 break; 1213 } 1214 } 1215 } 1216 } 1217 } 1218 1219 // Step back to the previous node 1220 if (a == last_out) { 1221 processed[tip] = true; 1222 stack[++stack_top] = tip; 1223 tip = pred[tip]; 1224 if (tip < 0) { 1225 // Finish DFS from the current start node 1226 break; 1227 } 1228 ++_next_out[tip]; 1229 } 1230 } 1231 1232 } 1233 1234 return (cycle_cnt == 0); 996 1235 } 997 1236 998 1237 // Global potential update heuristic 999 1238 void globalUpdate() { 1000 int bucket_end = _root + 1;1239 const int bucket_end = _root + 1; 1001 1240 1002 1241 // Initialize buckets … … 1005 1244 } 1006 1245 Value total_excess = 0; 1246 int b0 = bucket_end; 1007 1247 for (int i = 0; i != _res_node_num; ++i) { 1008 1248 if (_excess[i] < 0) { 1009 1249 _rank[i] = 0; 1010 _bucket_next[i] = _buckets[0];1011 _bucket_prev[ _buckets[0]] = i;1012 _buckets[0]= i;1250 _bucket_next[i] = b0; 1251 _bucket_prev[b0] = i; 1252 b0 = i; 1013 1253 } else { 1014 1254 total_excess += _excess[i]; … … 1017 1257 } 1018 1258 if (total_excess == 0) return; 1259 _buckets[0] = b0; 1019 1260 1020 1261 // Search the buckets … … 1038 1279 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; 1039 1280 int new_rank_v = old_rank_v; 1040 if (nrc < LargeCost(_max_rank)) 1041 new_rank_v = r + 1 + int(nrc); 1281 if (nrc < LargeCost(_max_rank)) { 1282 new_rank_v = r + 1 + static_cast<int>(nrc); 1283 } 1042 1284 1043 1285 // Change the rank of v … … 1051 1293 _buckets[old_rank_v] = _bucket_next[v]; 1052 1294 } else { 1053 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; 1054 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; 1295 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1296 _bucket_next[pv] = nv; 1297 _bucket_prev[nv] = pv; 1055 1298 } 1056 1299 } 1057 1300 1058 // Insert v to its new bucket 1059 _bucket_next[v] = _buckets[new_rank_v]; 1060 _bucket_prev[_buckets[new_rank_v]] = v; 1301 // Insert v into its new bucket 1302 int nv = _buckets[new_rank_v]; 1303 _bucket_next[v] = nv; 1304 _bucket_prev[nv] = v; 1061 1305 _buckets[new_rank_v] = v; 1062 1306 } … … 1087 1331 void startAugment(int max_length) { 1088 1332 // Paramters for heuristics 1089 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1090 const double GLOBAL_UPDATE_FACTOR = 3.0; 1091 1092 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1333 const int PRICE_REFINEMENT_LIMIT = 2; 1334 const double GLOBAL_UPDATE_FACTOR = 1.0; 1335 const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * 1093 1336 (_res_node_num + _sup_node_num * _sup_node_num)); 1094 int next_update_limit = global_update_freq; 1095 1337 int next_global_update_limit = global_update_skip; 1338 1339 // Perform cost scaling phases 1340 IntVector path; 1341 BoolVector path_arc(_res_arc_num, false); 1096 1342 int relabel_cnt = 0; 1097 1098 // Perform cost scaling phases 1099 std::vector<int> path; 1343 int eps_phase_cnt = 0; 1100 1344 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1101 1345 1 : _epsilon / _alpha ) 1102 1346 { 1103 // Early termination heuristic 1104 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1105 if (earlyTermination()) break; 1347 ++eps_phase_cnt; 1348 1349 // Price refinement heuristic 1350 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1351 if (priceRefinement()) continue; 1106 1352 } 1107 1353 … … 1120 1366 1121 1367 // Find an augmenting path from the start node 1122 path.clear();1123 1368 int tip = start; 1124 while ( _excess[tip] >= 0 && int(path.size()) < max_length) {1369 while (int(path.size()) < max_length && _excess[tip] >= 0) { 1125 1370 int u; 1126 LargeCost min_red_cost, rc, pi_tip = _pi[tip]; 1371 LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max(); 1372 LargeCost pi_tip = _pi[tip]; 1127 1373 int last_out = _first_out[tip+1]; 1128 1374 for (int a = _next_out[tip]; a != last_out; ++a) { 1129 u = _target[a]; 1130 if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) { 1131 path.push_back(a); 1132 _next_out[tip] = a; 1133 tip = u; 1134 goto next_step; 1375 if (_res_cap[a] > 0) { 1376 u = _target[a]; 1377 rc = _cost[a] + pi_tip - _pi[u]; 1378 if (rc < 0) { 1379 path.push_back(a); 1380 _next_out[tip] = a; 1381 if (path_arc[a]) { 1382 goto augment; // a cycle is found, stop path search 1383 } 1384 tip = u; 1385 path_arc[a] = true; 1386 goto next_step; 1387 } 1388 else if (rc < min_red_cost) { 1389 min_red_cost = rc; 1390 } 1135 1391 } 1136 1392 } 1137 1393 1138 1394 // Relabel tip node 1139 min_red_cost = std::numeric_limits<LargeCost>::max();1140 1395 if (tip != start) { 1141 1396 int ra = _reverse[path.back()]; 1142 min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]]; 1397 min_red_cost = 1398 std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]); 1143 1399 } 1400 last_out = _next_out[tip]; 1144 1401 for (int a = _first_out[tip]; a != last_out; ++a) { 1145 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1146 if (_res_cap[a] > 0 && rc < min_red_cost) { 1147 min_red_cost = rc; 1402 if (_res_cap[a] > 0) { 1403 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1404 if (rc < min_red_cost) { 1405 min_red_cost = rc; 1406 } 1148 1407 } 1149 1408 } … … 1154 1413 // Step back 1155 1414 if (tip != start) { 1156 tip = _source[path.back()]; 1415 int pa = path.back(); 1416 path_arc[pa] = false; 1417 tip = _source[pa]; 1157 1418 path.pop_back(); 1158 1419 } … … 1162 1423 1163 1424 // Augment along the found path (as much flow as possible) 1425 augment: 1164 1426 Value delta; 1165 1427 int pa, u, v = start; … … 1168 1430 u = v; 1169 1431 v = _target[pa]; 1432 path_arc[pa] = false; 1170 1433 delta = std::min(_res_cap[pa], _excess[u]); 1171 1434 _res_cap[pa] -= delta; … … 1173 1436 _excess[u] -= delta; 1174 1437 _excess[v] += delta; 1175 if (_excess[v] > 0 && _excess[v] <= delta) 1438 if (_excess[v] > 0 && _excess[v] <= delta) { 1176 1439 _active_nodes.push_back(v); 1177 } 1440 } 1441 } 1442 path.clear(); 1178 1443 1179 1444 // Global update heuristic 1180 if (relabel_cnt >= next_ update_limit) {1445 if (relabel_cnt >= next_global_update_limit) { 1181 1446 globalUpdate(); 1182 next_update_limit += global_update_freq; 1183 } 1184 } 1185 } 1447 next_global_update_limit += global_update_skip; 1448 } 1449 } 1450 1451 } 1452 1186 1453 } 1187 1454 … … 1189 1456 void startPush() { 1190 1457 // Paramters for heuristics 1191 const int EARLY_TERM_EPSILON_LIMIT = 1000;1458 const int PRICE_REFINEMENT_LIMIT = 2; 1192 1459 const double GLOBAL_UPDATE_FACTOR = 2.0; 1193 1460 1194 const int global_update_ freq = int(GLOBAL_UPDATE_FACTOR *1461 const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * 1195 1462 (_res_node_num + _sup_node_num * _sup_node_num)); 1196 int next_update_limit = global_update_freq; 1197 1198 int relabel_cnt = 0; 1463 int next_global_update_limit = global_update_skip; 1199 1464 1200 1465 // Perform cost scaling phases 1201 1466 BoolVector hyper(_res_node_num, false); 1202 1467 LargeCostVector hyper_cost(_res_node_num); 1468 int relabel_cnt = 0; 1469 int eps_phase_cnt = 0; 1203 1470 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1204 1471 1 : _epsilon / _alpha ) 1205 1472 { 1206 // Early termination heuristic 1207 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1208 if (earlyTermination()) break; 1473 ++eps_phase_cnt; 1474 1475 // Price refinement heuristic 1476 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1477 if (priceRefinement()) continue; 1209 1478 } 1210 1479 … … 1278 1547 std::numeric_limits<LargeCost>::max(); 1279 1548 for (int a = _first_out[n]; a != last_out; ++a) { 1280 rc = _cost[a] + pi_n - _pi[_target[a]]; 1281 if (_res_cap[a] > 0 && rc < min_red_cost) { 1282 min_red_cost = rc; 1549 if (_res_cap[a] > 0) { 1550 rc = _cost[a] + pi_n - _pi[_target[a]]; 1551 if (rc < min_red_cost) { 1552 min_red_cost = rc; 1553 } 1283 1554 } 1284 1555 } … … 1298 1569 1299 1570 // Global update heuristic 1300 if (relabel_cnt >= next_ update_limit) {1571 if (relabel_cnt >= next_global_update_limit) { 1301 1572 globalUpdate(); 1302 1573 for (int u = 0; u != _res_node_num; ++u) 1303 1574 hyper[u] = false; 1304 next_ update_limit += global_update_freq;1575 next_global_update_limit += global_update_skip; 1305 1576 } 1306 1577 } -
lemon/cycle_canceling.h
r877 r922 66 66 /// algorithm. By default, it is the same as \c V. 67 67 /// 68 /// \warning Both number types must be signed and all input data must 68 /// \warning Both \c V and \c C must be signed number types. 69 /// \warning All input data (capacities, supply values, and costs) must 69 70 /// be integer. 70 /// \warning This algorithm does not support negative costs for such71 /// arcs that haveinfinite upper bound.71 /// \warning This algorithm does not support negative costs for 72 /// arcs having infinite upper bound. 72 73 /// 73 74 /// \note For more information about the three available methods, … … 117 118 /// \ref CycleCanceling provides three different cycle-canceling 118 119 /// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel and Tighten" 119 /// is used, which proved to be the most efficient and the most robust 120 /// on various test inputs. 120 /// is used, which is by far the most efficient and the most robust. 121 121 /// However, the other methods can be selected using the \ref run() 122 122 /// function with the proper parameter. … … 350 350 /// 351 351 /// Using this function has the same effect as using \ref supplyMap() 352 /// with sucha map in which \c k is assigned to \c s, \c -k is352 /// with a map in which \c k is assigned to \c s, \c -k is 353 353 /// assigned to \c t and all other nodes have zero supply value. 354 354 /// -
lemon/euler.h
r877 r919 37 37 ///Euler tour iterator for digraphs. 38 38 39 /// \ingroup graph_prop 39 /// \ingroup graph_properties 40 40 ///This iterator provides an Euler tour (Eulerian circuit) of a \e directed 41 41 ///graph (if there exists) and it converts to the \c Arc type of the digraph. -
lemon/hao_orlin.h
r877 r915 54 54 /// preflow push-relabel algorithm. Our implementation calculates 55 55 /// the minimum cut in \f$ O(n^2\sqrt{m}) \f$ time (we use the 56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. The57 /// purpose of such algorithm is e.g.testing network reliability.56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. A notable 57 /// use of this algorithm is testing network reliability. 58 58 /// 59 59 /// For an undirected graph you can run just the first phase of the … … 913 913 /// source-side (i.e. a set \f$ X\subsetneq V \f$ with 914 914 /// \f$ source \in X \f$ and minimal outgoing capacity). 915 /// It updates the stored cut if (and only if) the newly found one 916 /// is better. 915 917 /// 916 918 /// \pre \ref init() must be called before using this function. … … 925 927 /// sink-side (i.e. a set \f$ X\subsetneq V \f$ with 926 928 /// \f$ source \notin X \f$ and minimal outgoing capacity). 929 /// It updates the stored cut if (and only if) the newly found one 930 /// is better. 927 931 /// 928 932 /// \pre \ref init() must be called before using this function. … … 934 938 /// \brief Run the algorithm. 935 939 /// 936 /// This function runs the algorithm. It finds nodes \c source and937 /// \c target arbitrarily andthen calls \ref init(), \ref calculateOut()940 /// This function runs the algorithm. It chooses source node, 941 /// then calls \ref init(), \ref calculateOut() 938 942 /// and \ref calculateIn(). 939 943 void run() { … … 945 949 /// \brief Run the algorithm. 946 950 /// 947 /// This function runs the algorithm. It uses the given \c source node,948 /// finds a proper \c target node and then calls the \ref init(),949 /// \ref calculateOut() and \ref calculateIn().951 /// This function runs the algorithm. It calls \ref init(), 952 /// \ref calculateOut() and \ref calculateIn() with the given 953 /// source node. 950 954 void run(const Node& s) { 951 955 init(s); … … 966 970 /// \brief Return the value of the minimum cut. 967 971 /// 968 /// This function returns the value of the minimum cut. 972 /// This function returns the value of the best cut found by the 973 /// previously called \ref run(), \ref calculateOut() or \ref 974 /// calculateIn(). 969 975 /// 970 976 /// \pre \ref run(), \ref calculateOut() or \ref calculateIn() … … 977 983 /// \brief Return a minimum cut. 978 984 /// 979 /// This function sets \c cutMap to the characteristic vector of a 980 /// minimum value cut: it will give a non-empty set \f$ X\subsetneq V \f$ 981 /// with minimal outgoing capacity (i.e. \c cutMap will be \c true exactly 985 /// This function gives the best cut found by the 986 /// previously called \ref run(), \ref calculateOut() or \ref 987 /// calculateIn(). 988 /// 989 /// It sets \c cutMap to the characteristic vector of the found 990 /// minimum value cut - a non-empty set \f$ X\subsetneq V \f$ 991 /// of minimum outgoing capacity (i.e. \c cutMap will be \c true exactly 982 992 /// for the nodes of \f$ X \f$). 983 993 /// -
lemon/kruskal.h
r584 r921 31 31 ///\file 32 32 ///\brief Kruskal's algorithm to compute a minimum cost spanning tree 33 ///34 ///Kruskal's algorithm to compute a minimum cost spanning tree.35 ///36 33 37 34 namespace lemon { -
lemon/lemon.pc.in
r658 r981 1 prefix=@ prefix@2 exec_prefix=@ exec_prefix@3 libdir=@ libdir@4 includedir=@ includedir@1 prefix=@CMAKE_INSTALL_PREFIX@ 2 exec_prefix=@CMAKE_INSTALL_PREFIX@/bin 3 libdir=@CMAKE_INSTALL_PREFIX@/lib 4 includedir=@CMAKE_INSTALL_PREFIX@/include 5 5 6 Name: @P ACKAGE_NAME@6 Name: @PROJECT_NAME@ 7 7 Description: Library for Efficient Modeling and Optimization in Networks 8 Version: @P ACKAGE_VERSION@8 Version: @PROJECT_VERSION@ 9 9 Libs: -L${libdir} -lemon @GLPK_LIBS@ @CPLEX_LIBS@ @SOPLEX_LIBS@ @CLP_LIBS@ @CBC_LIBS@ 10 10 Cflags: -I${includedir} -
lemon/network_simplex.h
r889 r984 48 48 /// flow problem. 49 49 /// 50 /// In general, %NetworkSimplex is the fastest implementation available51 /// i n LEMON for this problem.52 /// Moreover, it supports both directions of the supply/demand inequality53 /// constraints. For more information, see \ref SupplyType.50 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest 51 /// implementations available in LEMON for this problem. 52 /// Furthermore, this class supports both directions of the supply/demand 53 /// inequality constraints. For more information, see \ref SupplyType. 54 54 /// 55 55 /// Most of the parameters of the problem (except for the digraph) … … 64 64 /// algorithm. By default, it is the same as \c V. 65 65 /// 66 /// \warning Both number types must be signed and all input data must 66 /// \warning Both \c V and \c C must be signed number types. 67 /// \warning All input data (capacities, supply values, and costs) must 67 68 /// be integer. 68 69 /// … … 122 123 /// the \ref run() function. 123 124 /// 124 /// \ref NetworkSimplex provides five different pivot rule125 /// implementations that significantly affectthe running time125 /// \ref NetworkSimplex provides five different implementations for 126 /// the pivot strategy that significantly affects the running time 126 127 /// of the algorithm. 127 /// By default, \ref BLOCK_SEARCH "Block Search" is used, which 128 /// proved to be the most efficient and the most robust on various 129 /// test inputs. 130 /// However, another pivot rule can be selected using the \ref run() 131 /// function with the proper parameter. 128 /// According to experimental tests conducted on various problem 129 /// instances, \ref BLOCK_SEARCH "Block Search" and 130 /// \ref ALTERING_LIST "Altering Candidate List" rules turned out 131 /// to be the most efficient. 132 /// Since \ref BLOCK_SEARCH "Block Search" is a simpler strategy that 133 /// seemed to be slightly more robust, it is used by default. 134 /// However, another pivot rule can easily be selected using the 135 /// \ref run() function with the proper parameter. 132 136 enum PivotRule { 133 137 … … 155 159 /// The \e Altering \e Candidate \e List pivot rule. 156 160 /// It is a modified version of the Candidate List method. 157 /// It keeps only the severalbest eligible arcs from the former161 /// It keeps only a few of the best eligible arcs from the former 158 162 /// candidate list and extends this list in every iteration. 159 163 ALTERING_LIST … … 167 171 typedef std::vector<Value> ValueVector; 168 172 typedef std::vector<Cost> CostVector; 169 typedef std::vector<char> BoolVector; 170 // Note: vector<char> is used instead of vector<bool> for efficiency reasons 173 typedef std::vector<signed char> CharVector; 174 // Note: vector<signed char> is used instead of vector<ArcState> and 175 // vector<ArcDirection> for efficiency reasons 171 176 172 177 // State constants for arcs … … 177 182 }; 178 183 179 typedef std::vector<signed char> StateVector; 180 // Note: vector<signed char> is used instead of vector<ArcState> for 181 // efficiency reasons 184 // Direction constants for tree arcs 185 enum ArcDirection { 186 DIR_DOWN = -1, 187 DIR_UP = 1 188 }; 182 189 183 190 private: … … 218 225 IntVector _succ_num; 219 226 IntVector _last_succ; 227 CharVector _pred_dir; 228 CharVector _state; 220 229 IntVector _dirty_revs; 221 BoolVector _forward;222 StateVector _state;223 230 int _root; 224 231 225 232 // Temporary data used in the current pivot iteration 226 233 int in_arc, join, u_in, v_in, u_out, v_out; 227 int first, second, right, last;228 int stem, par_stem, new_stem;229 234 Value delta; 230 235 … … 251 256 const IntVector &_target; 252 257 const CostVector &_cost; 253 const StateVector &_state;258 const CharVector &_state; 254 259 const CostVector &_pi; 255 260 int &_in_arc; … … 303 308 const IntVector &_target; 304 309 const CostVector &_cost; 305 const StateVector &_state;310 const CharVector &_state; 306 311 const CostVector &_pi; 307 312 int &_in_arc; … … 342 347 const IntVector &_target; 343 348 const CostVector &_cost; 344 const StateVector &_state;349 const CharVector &_state; 345 350 const CostVector &_pi; 346 351 int &_in_arc; … … 415 420 const IntVector &_target; 416 421 const CostVector &_cost; 417 const StateVector &_state;422 const CharVector &_state; 418 423 const CostVector &_pi; 419 424 int &_in_arc; … … 518 523 const IntVector &_target; 519 524 const CostVector &_cost; 520 const StateVector &_state;525 const CharVector &_state; 521 526 const CostVector &_pi; 522 527 int &_in_arc; … … 537 542 SortFunc(const CostVector &map) : _map(map) {} 538 543 bool operator()(int left, int right) { 539 return _map[left] >_map[right];544 return _map[left] < _map[right]; 540 545 } 541 546 }; … … 555 560 const double BLOCK_SIZE_FACTOR = 1.0; 556 561 const int MIN_BLOCK_SIZE = 10; 557 const double HEAD_LENGTH_FACTOR = 0. 1;562 const double HEAD_LENGTH_FACTOR = 0.01; 558 563 const int MIN_HEAD_LENGTH = 3; 559 564 … … 571 576 // Check the current candidate list 572 577 int e; 578 Cost c; 573 579 for (int i = 0; i != _curr_length; ++i) { 574 580 e = _candidates[i]; 575 _cand_cost[e] = _state[e] * 576 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 577 if (_cand_cost[e] >= 0) { 581 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 582 if (c < 0) { 583 _cand_cost[e] = c; 584 } else { 578 585 _candidates[i--] = _candidates[--_curr_length]; 579 586 } … … 585 592 586 593 for (e = _next_arc; e != _search_arc_num; ++e) { 587 _cand_cost[e] = _state[e] *588 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);589 if (_cand_cost[e] < 0) {594 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 595 if (c < 0) { 596 _cand_cost[e] = c; 590 597 _candidates[_curr_length++] = e; 591 598 } … … 597 604 } 598 605 for (e = 0; e != _next_arc; ++e) { 599 _cand_cost[e] = _state[e] *600 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);601 if (_cand_cost[e] < 0) {606 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 607 if (c < 0) { 608 _cand_cost[e] = c; 602 609 _candidates[_curr_length++] = e; 603 610 } … … 612 619 search_end: 613 620 614 // Make heap of the candidate list (approximating a partial sort) 615 make_heap( _candidates.begin(), _candidates.begin() + _curr_length, 616 _sort_func ); 617 618 // Pop the first element of the heap 621 // Perform partial sort operation on the candidate list 622 int new_length = std::min(_head_length + 1, _curr_length); 623 std::partial_sort(_candidates.begin(), _candidates.begin() + new_length, 624 _candidates.begin() + _curr_length, _sort_func); 625 626 // Select the entering arc and remove it from the list 619 627 _in_arc = _candidates[0]; 620 628 _next_arc = e; 621 pop_heap( _candidates.begin(), _candidates.begin() + _curr_length, 622 _sort_func ); 623 _curr_length = std::min(_head_length, _curr_length - 1); 629 _candidates[0] = _candidates[new_length - 1]; 630 _curr_length = new_length - 1; 624 631 return true; 625 632 } … … 634 641 /// 635 642 /// \param graph The digraph the algorithm runs on. 636 /// \param arc_mixing Indicate if the arcs have tobe stored in a643 /// \param arc_mixing Indicate if the arcs will be stored in a 637 644 /// mixed order in the internal data structure. 638 /// In special cases, it could lead to better overall performance, 639 /// but it is usually slower. Therefore it is disabled by default. 640 NetworkSimplex(const GR& graph, bool arc_mixing = false) : 645 /// In general, it leads to similar performance as using the original 646 /// arc order, but it makes the algorithm more robust and in special 647 /// cases, even significantly faster. Therefore, it is enabled by default. 648 NetworkSimplex(const GR& graph, bool arc_mixing = true) : 641 649 _graph(graph), _node_id(graph), _arc_id(graph), 642 650 _arc_mixing(arc_mixing), … … 731 739 /// 732 740 /// \return <tt>(*this)</tt> 741 /// 742 /// \sa supplyType() 733 743 template<typename SupplyMap> 734 744 NetworkSimplex& supplyMap(const SupplyMap& map) { … … 747 757 /// 748 758 /// Using this function has the same effect as using \ref supplyMap() 749 /// with sucha map in which \c k is assigned to \c s, \c -k is759 /// with a map in which \c k is assigned to \c s, \c -k is 750 760 /// assigned to \c t and all other nodes have zero supply value. 751 761 /// … … 914 924 _parent.resize(all_node_num); 915 925 _pred.resize(all_node_num); 916 _ forward.resize(all_node_num);926 _pred_dir.resize(all_node_num); 917 927 _thread.resize(all_node_num); 918 928 _rev_thread.resize(all_node_num); … … 928 938 if (_arc_mixing) { 929 939 // Store the arcs in a mixed order 930 int k = std::max(int(std::sqrt(double(_arc_num))), 10);940 const int skip = std::max(_arc_num / _node_num, 3); 931 941 int i = 0, j = 0; 932 942 for (ArcIt a(_graph); a != INVALID; ++a) { … … 934 944 _source[i] = _node_id[_graph.source(a)]; 935 945 _target[i] = _node_id[_graph.target(a)]; 936 if ((i += k) >= _arc_num) i = ++j;946 if ((i += skip) >= _arc_num) i = ++j; 937 947 } 938 948 } else { … … 1117 1127 _state[e] = STATE_TREE; 1118 1128 if (_supply[u] >= 0) { 1119 _ forward[u] = true;1129 _pred_dir[u] = DIR_UP; 1120 1130 _pi[u] = 0; 1121 1131 _source[e] = u; … … 1124 1134 _cost[e] = 0; 1125 1135 } else { 1126 _ forward[u] = false;1136 _pred_dir[u] = DIR_DOWN; 1127 1137 _pi[u] = ART_COST; 1128 1138 _source[e] = _root; … … 1144 1154 _last_succ[u] = u; 1145 1155 if (_supply[u] >= 0) { 1146 _ forward[u] = true;1156 _pred_dir[u] = DIR_UP; 1147 1157 _pi[u] = 0; 1148 1158 _pred[u] = e; … … 1154 1164 _state[e] = STATE_TREE; 1155 1165 } else { 1156 _ forward[u] = false;1166 _pred_dir[u] = DIR_DOWN; 1157 1167 _pi[u] = ART_COST; 1158 1168 _pred[u] = f; … … 1185 1195 _last_succ[u] = u; 1186 1196 if (_supply[u] <= 0) { 1187 _ forward[u] = false;1197 _pred_dir[u] = DIR_DOWN; 1188 1198 _pi[u] = 0; 1189 1199 _pred[u] = e; … … 1195 1205 _state[e] = STATE_TREE; 1196 1206 } else { 1197 _ forward[u] = true;1207 _pred_dir[u] = DIR_UP; 1198 1208 _pi[u] = -ART_COST; 1199 1209 _pred[u] = f; … … 1238 1248 // Initialize first and second nodes according to the direction 1239 1249 // of the cycle 1250 int first, second; 1240 1251 if (_state[in_arc] == STATE_LOWER) { 1241 1252 first = _source[in_arc]; … … 1247 1258 delta = _cap[in_arc]; 1248 1259 int result = 0; 1249 Value d;1260 Value c, d; 1250 1261 int e; 1251 1262 1252 // Search the cycle along the path form the first node to the root1263 // Search the cycle form the first node to the join node 1253 1264 for (int u = first; u != join; u = _parent[u]) { 1254 1265 e = _pred[u]; 1255 d = _forward[u] ? 1256 _flow[e] : (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]); 1266 d = _flow[e]; 1267 if (_pred_dir[u] == DIR_DOWN) { 1268 c = _cap[e]; 1269 d = c >= MAX ? INF : c - d; 1270 } 1257 1271 if (d < delta) { 1258 1272 delta = d; … … 1261 1275 } 1262 1276 } 1263 // Search the cycle along the path form the second node to the root 1277 1278 // Search the cycle form the second node to the join node 1264 1279 for (int u = second; u != join; u = _parent[u]) { 1265 1280 e = _pred[u]; 1266 d = _forward[u] ? 1267 (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]) : _flow[e]; 1281 d = _flow[e]; 1282 if (_pred_dir[u] == DIR_UP) { 1283 c = _cap[e]; 1284 d = c >= MAX ? INF : c - d; 1285 } 1268 1286 if (d <= delta) { 1269 1287 delta = d; … … 1290 1308 _flow[in_arc] += val; 1291 1309 for (int u = _source[in_arc]; u != join; u = _parent[u]) { 1292 _flow[_pred[u]] += _forward[u] ? -val :val;1310 _flow[_pred[u]] -= _pred_dir[u] * val; 1293 1311 } 1294 1312 for (int u = _target[in_arc]; u != join; u = _parent[u]) { 1295 _flow[_pred[u]] += _ forward[u] ? val : -val;1313 _flow[_pred[u]] += _pred_dir[u] * val; 1296 1314 } 1297 1315 } … … 1308 1326 // Update the tree structure 1309 1327 void updateTreeStructure() { 1310 int u, w;1311 1328 int old_rev_thread = _rev_thread[u_out]; 1312 1329 int old_succ_num = _succ_num[u_out]; … … 1314 1331 v_out = _parent[u_out]; 1315 1332 1316 u = _last_succ[u_in]; // the last successor of u_in 1317 right = _thread[u]; // the node after it 1318 1319 // Handle the case when old_rev_thread equals to v_in 1320 // (it also means that join and v_out coincide) 1321 if (old_rev_thread == v_in) { 1322 last = _thread[_last_succ[u_out]]; 1333 // Check if u_in and u_out coincide 1334 if (u_in == u_out) { 1335 // Update _parent, _pred, _pred_dir 1336 _parent[u_in] = v_in; 1337 _pred[u_in] = in_arc; 1338 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1339 1340 // Update _thread and _rev_thread 1341 if (_thread[v_in] != u_out) { 1342 int after = _thread[old_last_succ]; 1343 _thread[old_rev_thread] = after; 1344 _rev_thread[after] = old_rev_thread; 1345 after = _thread[v_in]; 1346 _thread[v_in] = u_out; 1347 _rev_thread[u_out] = v_in; 1348 _thread[old_last_succ] = after; 1349 _rev_thread[after] = old_last_succ; 1350 } 1323 1351 } else { 1324 last = _thread[v_in]; 1325 } 1326 1327 // Update _thread and _parent along the stem nodes (i.e. the nodes 1328 // between u_in and u_out, whose parent have to be changed) 1329 _thread[v_in] = stem = u_in; 1330 _dirty_revs.clear(); 1331 _dirty_revs.push_back(v_in); 1332 par_stem = v_in; 1333 while (stem != u_out) { 1334 // Insert the next stem node into the thread list 1335 new_stem = _parent[stem]; 1336 _thread[u] = new_stem; 1337 _dirty_revs.push_back(u); 1338 1339 // Remove the subtree of stem from the thread list 1340 w = _rev_thread[stem]; 1341 _thread[w] = right; 1342 _rev_thread[right] = w; 1343 1344 // Change the parent node and shift stem nodes 1345 _parent[stem] = par_stem; 1346 par_stem = stem; 1347 stem = new_stem; 1348 1349 // Update u and right 1350 u = _last_succ[stem] == _last_succ[par_stem] ? 1351 _rev_thread[par_stem] : _last_succ[stem]; 1352 right = _thread[u]; 1353 } 1354 _parent[u_out] = par_stem; 1355 _thread[u] = last; 1356 _rev_thread[last] = u; 1357 _last_succ[u_out] = u; 1358 1359 // Remove the subtree of u_out from the thread list except for 1360 // the case when old_rev_thread equals to v_in 1361 // (it also means that join and v_out coincide) 1362 if (old_rev_thread != v_in) { 1363 _thread[old_rev_thread] = right; 1364 _rev_thread[right] = old_rev_thread; 1365 } 1366 1367 // Update _rev_thread using the new _thread values 1368 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1369 u = _dirty_revs[i]; 1370 _rev_thread[_thread[u]] = u; 1371 } 1372 1373 // Update _pred, _forward, _last_succ and _succ_num for the 1374 // stem nodes from u_out to u_in 1375 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1376 u = u_out; 1377 while (u != u_in) { 1378 w = _parent[u]; 1379 _pred[u] = _pred[w]; 1380 _forward[u] = !_forward[w]; 1381 tmp_sc += _succ_num[u] - _succ_num[w]; 1382 _succ_num[u] = tmp_sc; 1383 _last_succ[w] = tmp_ls; 1384 u = w; 1385 } 1386 _pred[u_in] = in_arc; 1387 _forward[u_in] = (u_in == _source[in_arc]); 1388 _succ_num[u_in] = old_succ_num; 1389 1390 // Set limits for updating _last_succ form v_in and v_out 1391 // towards the root 1392 int up_limit_in = -1; 1393 int up_limit_out = -1; 1394 if (_last_succ[join] == v_in) { 1395 up_limit_out = join; 1396 } else { 1397 up_limit_in = join; 1352 // Handle the case when old_rev_thread equals to v_in 1353 // (it also means that join and v_out coincide) 1354 int thread_continue = old_rev_thread == v_in ? 1355 _thread[old_last_succ] : _thread[v_in]; 1356 1357 // Update _thread and _parent along the stem nodes (i.e. the nodes 1358 // between u_in and u_out, whose parent have to be changed) 1359 int stem = u_in; // the current stem node 1360 int par_stem = v_in; // the new parent of stem 1361 int next_stem; // the next stem node 1362 int last = _last_succ[u_in]; // the last successor of stem 1363 int before, after = _thread[last]; 1364 _thread[v_in] = u_in; 1365 _dirty_revs.clear(); 1366 _dirty_revs.push_back(v_in); 1367 while (stem != u_out) { 1368 // Insert the next stem node into the thread list 1369 next_stem = _parent[stem]; 1370 _thread[last] = next_stem; 1371 _dirty_revs.push_back(last); 1372 1373 // Remove the subtree of stem from the thread list 1374 before = _rev_thread[stem]; 1375 _thread[before] = after; 1376 _rev_thread[after] = before; 1377 1378 // Change the parent node and shift stem nodes 1379 _parent[stem] = par_stem; 1380 par_stem = stem; 1381 stem = next_stem; 1382 1383 // Update last and after 1384 last = _last_succ[stem] == _last_succ[par_stem] ? 1385 _rev_thread[par_stem] : _last_succ[stem]; 1386 after = _thread[last]; 1387 } 1388 _parent[u_out] = par_stem; 1389 _thread[last] = thread_continue; 1390 _rev_thread[thread_continue] = last; 1391 _last_succ[u_out] = last; 1392 1393 // Remove the subtree of u_out from the thread list except for 1394 // the case when old_rev_thread equals to v_in 1395 if (old_rev_thread != v_in) { 1396 _thread[old_rev_thread] = after; 1397 _rev_thread[after] = old_rev_thread; 1398 } 1399 1400 // Update _rev_thread using the new _thread values 1401 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1402 int u = _dirty_revs[i]; 1403 _rev_thread[_thread[u]] = u; 1404 } 1405 1406 // Update _pred, _pred_dir, _last_succ and _succ_num for the 1407 // stem nodes from u_out to u_in 1408 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1409 for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { 1410 _pred[u] = _pred[p]; 1411 _pred_dir[u] = -_pred_dir[p]; 1412 tmp_sc += _succ_num[u] - _succ_num[p]; 1413 _succ_num[u] = tmp_sc; 1414 _last_succ[p] = tmp_ls; 1415 } 1416 _pred[u_in] = in_arc; 1417 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1418 _succ_num[u_in] = old_succ_num; 1398 1419 } 1399 1420 1400 1421 // Update _last_succ from v_in towards the root 1401 for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; 1402 u = _parent[u]) { 1403 _last_succ[u] = _last_succ[u_out]; 1404 } 1422 int up_limit_out = _last_succ[join] == v_in ? join : -1; 1423 int last_succ_out = _last_succ[u_out]; 1424 for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { 1425 _last_succ[u] = last_succ_out; 1426 } 1427 1405 1428 // Update _last_succ from v_out towards the root 1406 1429 if (join != old_rev_thread && v_in != old_rev_thread) { 1407 for ( u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;1430 for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1408 1431 u = _parent[u]) { 1409 1432 _last_succ[u] = old_rev_thread; 1410 1433 } 1411 } else { 1412 for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1434 } 1435 else if (last_succ_out != old_last_succ) { 1436 for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1413 1437 u = _parent[u]) { 1414 _last_succ[u] = _last_succ[u_out];1438 _last_succ[u] = last_succ_out; 1415 1439 } 1416 1440 } 1417 1441 1418 1442 // Update _succ_num from v_in to join 1419 for ( u = v_in; u != join; u = _parent[u]) {1443 for (int u = v_in; u != join; u = _parent[u]) { 1420 1444 _succ_num[u] += old_succ_num; 1421 1445 } 1422 1446 // Update _succ_num from v_out to join 1423 for ( u = v_out; u != join; u = _parent[u]) {1447 for (int u = v_out; u != join; u = _parent[u]) { 1424 1448 _succ_num[u] -= old_succ_num; 1425 1449 } 1426 1450 } 1427 1451 1428 // Update potentials 1452 // Update potentials in the subtree that has been moved 1429 1453 void updatePotential() { 1430 Cost sigma = _forward[u_in] ? 1431 _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : 1432 _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; 1433 // Update potentials in the subtree, which has been moved 1454 Cost sigma = _pi[v_in] - _pi[u_in] - 1455 _pred_dir[u_in] * _cost[in_arc]; 1434 1456 int end = _thread[_last_succ[u_in]]; 1435 1457 for (int u = u_in; u != end; u = _thread[u]) { -
lemon/path.h
r998 r1000 44 44 /// 45 45 /// In a sense, the path can be treated as a list of arcs. The 46 /// lemonpath type stores just this list. As a consequence, it46 /// LEMON path type stores just this list. As a consequence, it 47 47 /// cannot enumerate the nodes of the path and the source node of 48 48 /// a zero length path is undefined. … … 149 149 void clear() { head.clear(); tail.clear(); } 150 150 151 /// \brief The n th arc.151 /// \brief The n-th arc. 152 152 /// 153 153 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 157 157 } 158 158 159 /// \brief Initialize arc iterator to point to the n th arc159 /// \brief Initialize arc iterator to point to the n-th arc 160 160 /// 161 161 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 245 245 /// 246 246 /// In a sense, the path can be treated as a list of arcs. The 247 /// lemonpath type stores just this list. As a consequence it247 /// LEMON path type stores just this list. As a consequence it 248 248 /// cannot enumerate the nodes in the path and the zero length paths 249 249 /// cannot store the source. … … 354 354 void clear() { data.clear(); } 355 355 356 /// \brief The n th arc.356 /// \brief The n-th arc. 357 357 /// 358 358 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 361 361 } 362 362 363 /// \brief Initializes arc iterator to point to the n th arc.363 /// \brief Initializes arc iterator to point to the n-th arc. 364 364 ArcIt nthIt(int n) const { 365 365 return ArcIt(*this, n); … … 422 422 /// 423 423 /// In a sense, the path can be treated as a list of arcs. The 424 /// lemonpath type stores just this list. As a consequence it424 /// LEMON path type stores just this list. As a consequence it 425 425 /// cannot enumerate the nodes in the path and the zero length paths 426 426 /// cannot store the source. … … 544 544 }; 545 545 546 /// \brief The n th arc.547 /// 548 /// This function looks for the n th arc in O(n) time.546 /// \brief The n-th arc. 547 /// 548 /// This function looks for the n-th arc in O(n) time. 549 549 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. 550 550 const Arc& nth(int n) const { … … 556 556 } 557 557 558 /// \brief Initializes arc iterator to point to the n th arc.558 /// \brief Initializes arc iterator to point to the n-th arc. 559 559 ArcIt nthIt(int n) const { 560 560 Node *node = first; … … 775 775 /// 776 776 /// In a sense, the path can be treated as a list of arcs. The 777 /// lemonpath type stores just this list. As a consequence it777 /// LEMON path type stores just this list. As a consequence it 778 778 /// cannot enumerate the nodes in the path and the source node of 779 779 /// a zero length path is undefined. … … 884 884 }; 885 885 886 /// \brief The n th arc.886 /// \brief The n-th arc. 887 887 /// 888 888 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 891 891 } 892 892 893 /// \brief The arc iterator pointing to the n th arc.893 /// \brief The arc iterator pointing to the n-th arc. 894 894 ArcIt nthIt(int n) const { 895 895 return ArcIt(*this, n); … … 1095 1095 /// 1096 1096 /// In a sense, the path can be treated as a list of arcs. The 1097 /// lemonpath type stores only this list. As a consequence, it1097 /// LEMON path type stores only this list. As a consequence, it 1098 1098 /// cannot enumerate the nodes in the path and the zero length paths 1099 1099 /// cannot have a source node. -
test/CMakeLists.txt
r998 r1000 38 38 maps_test 39 39 matching_test 40 max_cardinality_search_test 41 max_clique_test 40 42 min_cost_arborescence_test 41 43 min_cost_flow_test 42 44 min_mean_cycle_test 45 nagamochi_ibaraki_test 43 46 path_test 44 47 planarity_test -
test/graph_copy_test.cc
r893 r894 19 19 #include <lemon/smart_graph.h> 20 20 #include <lemon/list_graph.h> 21 #include <lemon/static_graph.h> 21 22 #include <lemon/lgf_reader.h> 22 23 #include <lemon/error.h> … … 27 28 using namespace lemon; 28 29 30 template <typename GR> 29 31 void digraph_copy_test() { 30 32 const int nn = 10; … … 52 54 } 53 55 } 54 56 55 57 // Test digraph copy 56 ListDigraphto;57 ListDigraph::NodeMap<int> tnm(to);58 ListDigraph::ArcMap<int> tam(to);59 ListDigraph::Node tn;60 ListDigraph::Arc ta;61 62 SmartDigraph::NodeMap< ListDigraph::Node> nr(from);63 SmartDigraph::ArcMap< ListDigraph::Arc> er(from);64 65 ListDigraph::NodeMap<SmartDigraph::Node> ncr(to);66 ListDigraph::ArcMap<SmartDigraph::Arc> ecr(to);58 GR to; 59 typename GR::template NodeMap<int> tnm(to); 60 typename GR::template ArcMap<int> tam(to); 61 typename GR::Node tn; 62 typename GR::Arc ta; 63 64 SmartDigraph::NodeMap<typename GR::Node> nr(from); 65 SmartDigraph::ArcMap<typename GR::Arc> er(from); 66 67 typename GR::template NodeMap<SmartDigraph::Node> ncr(to); 68 typename GR::template ArcMap<SmartDigraph::Arc> ecr(to); 67 69 68 70 digraphCopy(from, to). … … 87 89 } 88 90 89 for ( ListDigraph::NodeIt it(to); it != INVALID; ++it) {91 for (typename GR::NodeIt it(to); it != INVALID; ++it) { 90 92 check(nr[ncr[it]] == it, "Wrong copy."); 91 93 } 92 94 93 for ( ListDigraph::ArcIt it(to); it != INVALID; ++it) {95 for (typename GR::ArcIt it(to); it != INVALID; ++it) { 94 96 check(er[ecr[it]] == it, "Wrong copy."); 95 97 } … … 104 106 } 105 107 108 template <typename GR> 106 109 void graph_copy_test() { 107 110 const int nn = 10; … … 136 139 137 140 // Test graph copy 138 ListGraphto;139 ListGraph::NodeMap<int> tnm(to);140 ListGraph::ArcMap<int> tam(to);141 ListGraph::EdgeMap<int> tem(to);142 ListGraph::Node tn;143 ListGraph::Arc ta;144 ListGraph::Edge te;145 146 SmartGraph::NodeMap< ListGraph::Node> nr(from);147 SmartGraph::ArcMap< ListGraph::Arc> ar(from);148 SmartGraph::EdgeMap< ListGraph::Edge> er(from);149 150 ListGraph::NodeMap<SmartGraph::Node> ncr(to);151 ListGraph::ArcMap<SmartGraph::Arc> acr(to);152 ListGraph::EdgeMap<SmartGraph::Edge> ecr(to);141 GR to; 142 typename GR::template NodeMap<int> tnm(to); 143 typename GR::template ArcMap<int> tam(to); 144 typename GR::template EdgeMap<int> tem(to); 145 typename GR::Node tn; 146 typename GR::Arc ta; 147 typename GR::Edge te; 148 149 SmartGraph::NodeMap<typename GR::Node> nr(from); 150 SmartGraph::ArcMap<typename GR::Arc> ar(from); 151 SmartGraph::EdgeMap<typename GR::Edge> er(from); 152 153 typename GR::template NodeMap<SmartGraph::Node> ncr(to); 154 typename GR::template ArcMap<SmartGraph::Arc> acr(to); 155 typename GR::template EdgeMap<SmartGraph::Edge> ecr(to); 153 156 154 157 graphCopy(from, to). … … 185 188 } 186 189 187 for ( ListGraph::NodeIt it(to); it != INVALID; ++it) {190 for (typename GR::NodeIt it(to); it != INVALID; ++it) { 188 191 check(nr[ncr[it]] == it, "Wrong copy."); 189 192 } 190 193 191 for ( ListGraph::ArcIt it(to); it != INVALID; ++it) {194 for (typename GR::ArcIt it(to); it != INVALID; ++it) { 192 195 check(ar[acr[it]] == it, "Wrong copy."); 193 196 } 194 for ( ListGraph::EdgeIt it(to); it != INVALID; ++it) {197 for (typename GR::EdgeIt it(to); it != INVALID; ++it) { 195 198 check(er[ecr[it]] == it, "Wrong copy."); 196 199 } … … 209 212 210 213 int main() { 211 digraph_copy_test(); 212 graph_copy_test(); 214 digraph_copy_test<SmartDigraph>(); 215 digraph_copy_test<ListDigraph>(); 216 digraph_copy_test<StaticDigraph>(); 217 graph_copy_test<SmartGraph>(); 218 graph_copy_test<ListGraph>(); 213 219 214 220 return 0;
Note: See TracChangeset
for help on using the changeset viewer.