Changes in / [1000:404b98971e1f:999:00f8d9f9920d] in lemon-main
- Files:
-
- 17 added
- 8 deleted
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
AUTHORS
r992 r951 1 The main developers of release series 1.xare1 The authors of the 1.x series are 2 2 3 3 * Balazs Dezso <deba@inf.elte.hu> … … 6 6 * Akos Ladanyi <ladanyi@tmit.bme.hu> 7 7 8 For more complete list of contributors, please visit the history of9 the LEMON source code repository: http://lemon.cs.elte.hu/hg/lemon8 For more details on the actual contribution, please visit the history 9 of the main LEMON source repository: http://lemon.cs.elte.hu/hg/lemon 10 10 11 Moreover, this version is heavily based on version 0.x of LEMON. Here12 is the list of people who contributed to those versions.11 Moreover, this version is heavily based on the 0.x series of 12 LEMON. Here is the list of people who contributed to those versions. 13 13 14 14 * Mihaly Barasz <klao@cs.elte.hu> -
CMakeLists.txt
r1000 r998 13 13 ELSE() 14 14 EXECUTE_PROCESS( 15 COMMAND 16 hg log -r. --template "{latesttag}" 15 COMMAND ${PYTHON_EXECUTABLE} ./scripts/chg-len.py 17 16 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 18 OUTPUT_VARIABLE HG_REVISION_ TAG17 OUTPUT_VARIABLE HG_REVISION_PATH 19 18 ERROR_QUIET 20 19 OUTPUT_STRIP_TRAILING_WHITESPACE 21 20 ) 22 21 EXECUTE_PROCESS( 23 COMMAND 24 hg log -r. --template "{latesttagdistance}" 22 COMMAND hg id -i 25 23 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 26 OUTPUT_VARIABLE HG_REVISION _DIST24 OUTPUT_VARIABLE HG_REVISION 27 25 ERROR_QUIET 28 26 OUTPUT_STRIP_TRAILING_WHITESPACE 29 27 ) 30 EXECUTE_PROCESS( 31 COMMAND 32 hg log -r. --template "{node|short}" 33 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 34 OUTPUT_VARIABLE HG_REVISION_ID 35 ERROR_QUIET 36 OUTPUT_STRIP_TRAILING_WHITESPACE 37 ) 38 39 IF(HG_REVISION_TAG STREQUAL "") 28 IF(HG_REVISION STREQUAL "") 40 29 SET(HG_REVISION_ID "hg-tip") 41 30 ELSE() 42 IF(HG_REVISION_TAG STREQUAL "null") 43 SET(HG_REVISION_TAG "trunk") 44 ELSEIF(HG_REVISION_TAG MATCHES "^r") 45 STRING(SUBSTRING ${HG_REVISION_TAG} 1 -1 HG_REVISION_TAG) 46 ENDIF() 47 IF(HG_REVISION_DIST STREQUAL "0") 48 SET(HG_REVISION ${HG_REVISION_TAG}) 31 IF(HG_REVISION_PATH STREQUAL "") 32 SET(HG_REVISION_ID ${HG_REVISION}) 49 33 ELSE() 50 SET(HG_REVISION 51 "${HG_REVISION_TAG}+${HG_REVISION_DIST}-${HG_REVISION_ID}") 34 SET(HG_REVISION_ID ${HG_REVISION_PATH}.${HG_REVISION}) 52 35 ENDIF() 53 36 ENDIF() 54 55 SET(LEMON_VERSION ${HG_REVISION} CACHE STRING "LEMON version string.") 37 SET(LEMON_VERSION ${HG_REVISION_ID} CACHE STRING "LEMON version string.") 56 38 ENDIF() 57 39 … … 133 115 SET(LEMON_HAVE_LONG_LONG ${HAVE_LONG_LONG}) 134 116 135 INCLUDE(FindThreads) 136 137 IF(NOT LEMON_THREADING) 138 IF(CMAKE_USE_PTHREADS_INIT) 139 SET(LEMON_THREADING "Pthread") 140 ELSEIF(CMAKE_USE_WIN32_THREADS_INIT) 141 SET(LEMON_THREADING "Win32") 142 ELSE() 143 SET(LEMON_THREADING "None") 144 ENDIF() 145 ENDIF() 146 147 SET( LEMON_THREADING "${LEMON_THREADING}" CACHE STRING 148 "Choose the threading library, options are: Pthread Win32 None." 149 FORCE ) 150 151 IF(LEMON_THREADING STREQUAL "Pthread") 152 SET(LEMON_USE_PTHREAD TRUE) 153 ELSEIF(LEMON_THREADING STREQUAL "Win32") 154 SET(LEMON_USE_WIN32_THREADS TRUE) 155 ENDIF() 117 INCLUDE(FindPythonInterp) 156 118 157 119 ENABLE_TESTING() … … 165 127 ADD_SUBDIRECTORY(lemon) 166 128 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) 167 ADD_SUBDIRECTORY(contrib)168 129 ADD_SUBDIRECTORY(demo) 169 130 ADD_SUBDIRECTORY(tools) … … 189 150 ENDIF() 190 151 191 CONFIGURE_FILE(192 ${PROJECT_SOURCE_DIR}/cmake/version.cmake.in193 ${PROJECT_BINARY_DIR}/cmake/version.cmake194 @ONLY195 )196 197 SET(ARCHIVE_BASE_NAME ${CMAKE_PROJECT_NAME})198 STRING(TOLOWER ${ARCHIVE_BASE_NAME} ARCHIVE_BASE_NAME)199 SET(ARCHIVE_NAME ${ARCHIVE_BASE_NAME}-${PROJECT_VERSION})200 ADD_CUSTOM_TARGET(dist201 COMMAND cmake -E remove_directory ${ARCHIVE_NAME}202 COMMAND hg archive ${ARCHIVE_NAME}203 COMMAND cmake -E copy cmake/version.cmake ${ARCHIVE_NAME}/cmake/version.cmake204 COMMAND tar -czf ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_NAME}205 COMMAND zip -r ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.zip ${ARCHIVE_NAME}206 COMMAND cmake -E copy_directory doc/html ${ARCHIVE_NAME}/doc/html207 COMMAND tar -czf ${ARCHIVE_NAME}.tar.gz ${ARCHIVE_NAME}208 COMMAND zip -r ${ARCHIVE_NAME}.zip ${ARCHIVE_NAME}209 COMMAND cmake -E copy_directory doc/html ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}210 COMMAND tar -czf ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}211 COMMAND zip -r ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.zip ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}212 COMMAND cmake -E remove_directory ${ARCHIVE_NAME}213 COMMAND cmake -E remove_directory ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}214 DEPENDS html215 WORKING_DIRECTORY ${PROJECT_BINARY_DIR})216 217 # CPACK config (Basically for NSIS)218 152 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) 219 153 SET(CPACK_PACKAGE_NAME ${PROJECT_NAME}) -
INSTALL
r992 r824 2 2 ========================= 3 3 4 This file contains instructions for building and installing LEMON from 5 source on Linux. The process on Windows is similar. 4 Since you are reading this I assume you already obtained one of the release 5 tarballs and successfully extracted it. The latest version of LEMON is 6 available at our web page (http://lemon.cs.elte.hu/). 6 7 7 Note that it is not necessary to install LEMON in order to use 8 it. Instead, you can easily integrate it with your own code 9 directly. For instructions, see 10 https://lemon.cs.elte.hu/trac/lemon/wiki/HowToCompile 11 8 LEMON provides two different build environments, one is based on "autotool", 9 while the other is based on "cmake". This file contains instructions only for 10 the former one, which is the recommended build environment on Linux, Mac OSX 11 and other unices or if you use Cygwin on Windows. For cmake installation 12 instructions visit http://lemon.cs.elte.hu. 12 13 13 14 In order to install LEMON from the extracted source tarball you have to 14 15 issue the following commands: 15 16 16 1. Step into the root of the source directory.17 1. `cd lemon-x.y.z' 17 18 18 $ cd lemon-x.y.z 19 This command changes to the directory which was created when you 20 extracted the sources. The x.y.z part is a version number. 19 21 20 2. Create a build subdirectory and step into it.22 2. `./configure' 21 23 22 $ mkdir build23 $ cd build24 This command runs the configure shell script, which does some checks and 25 creates the makefiles. 24 26 25 3. Perform system checks and create the makefiles.27 3. `make' 26 28 27 $ cmake .. 29 This command compiles the non-template part of LEMON into libemon.a 30 file. It also compiles the programs in the tools subdirectory by 31 default. 28 32 29 4. Build LEMON.33 4. `make check' 30 34 31 $ make 35 This step is optional, but recommended. It runs the test programs that 36 we developed for LEMON to check whether the library works properly on 37 your platform. 32 38 33 This command compiles the non-template part of LEMON into 34 libemon.a file. It also compiles the programs in the 'tools' and 35 'demo' subdirectories. 36 37 5. [Optional] Compile and run the self-tests. 38 39 $ make check 40 41 5. [Optional] Generate the user documentation. 42 43 $ make html 44 45 The release tarballs already include the documentation. 46 47 Note that for this step you need to have the following tools 48 installed: Python, Doxygen, Graphviz, Ghostscript, LaTeX. 49 50 6. [Optional] Install LEMON 51 52 $ make install 39 5. `make install' 53 40 54 41 This command installs LEMON under /usr/local (you will need root 55 privileges to be able to do that). If you want to install it to 56 some other location, then pass the 57 -DCMAKE_INSTALL_PREFIX=DIRECTORY flag to cmake in Step 3. 58 For example: 59 60 $ cmake -DCMAKE_INSTALL_PREFIX=/home/username/lemon' 42 privileges to be able to do that). If you want to install it to some 43 other location, then pass the --prefix=DIRECTORY flag to configure in 44 step 2. For example: `./configure --prefix=/home/username/lemon'. 45 46 6. `make install-html' 47 48 This command installs the documentation under share/doc/lemon/docs. The 49 generated documentation is included in the tarball. If you want to 50 generate it yourself, then run `make html'. Note that for this you need 51 to have the following programs installed: Doxygen, Graphviz, Ghostscript, 52 Latex. 53 61 54 62 55 Configure Options and Variables 63 56 =============================== 64 57 65 In Step 3, you can customize the build process by passing options to CMAKE. 58 In step 2 you can customize the actions of configure by setting variables 59 and passing options to it. This can be done like this: 60 `./configure [OPTION]... [VARIABLE=VALUE]...' 66 61 67 $ cmake [OPTIONS] .. 62 Below you will find some useful variables and options (see `./configure --help' 63 for more): 68 64 69 You find a list of the most useful options below. 65 CXX='comp' 70 66 71 -DCMAKE_INSTALL_PREFIX=PREFIX 67 Change the C++ compiler to 'comp'. 68 69 CXXFLAGS='flags' 70 71 Pass the 'flags' to the compiler. For example CXXFLAGS='-O3 -march=pentium-m' 72 turns on generation of aggressively optimized Pentium-M specific code. 73 74 --prefix=PREFIX 72 75 73 76 Set the installation prefix to PREFIX. By default it is /usr/local. 74 77 75 - DCMAKE_BUILD_TYPE=[Release|Debug|Maintainer|...]78 --enable-tools 76 79 77 This sets the compiler options. The choices are the following80 Build the programs in the tools subdirectory (default). 78 81 79 'Release': A strong optimization is turned on (-O3 with gcc). This 80 is the default setting and we strongly recommend using this for 81 the final compilation. 82 --disable-tools 82 83 83 'Debug': Optimization is turned off and debug info is added (-O0 84 -ggdb with gcc). If is recommended during the development. 84 Do not build the programs in the tools subdirectory. 85 85 86 'Maintainer': The same as 'Debug' but the compiler warnings are 87 converted to errors (-Werror with gcc). In addition, 'make' will 88 also automatically compile and execute the test codes. It is the 89 best way of ensuring that LEMON codebase is clean and safe. 86 --with-glpk[=PREFIX] 90 87 91 'RelWithDebInfo': Optimized build with debug info. 88 Enable GLPK support (default). You should specify the prefix too if 89 you installed GLPK to some non-standard location (e.g. your home 90 directory). If it is not found, GLPK support will be disabled. 92 91 93 'MinSizeRel': Size optimized build (-Os with gcc) 92 --with-glpk-includedir=DIR 94 93 95 -DTEST_WITH_VALGRIND=YES 94 The directory where the GLPK header files are located. This is only 95 useful when the GLPK headers and libraries are not under the same 96 prefix (which is unlikely). 96 97 97 Using this, the test codes will be executed using valgrind. It is a 98 very effective way of identifying indexing problems and memory leaks. 98 --with-glpk-libdir=DIR 99 99 100 -DCMAKE_CXX_COMPILER=path-to-compiler 100 The directory where the GLPK libraries are located. This is only 101 useful when the GLPK headers and libraries are not under the same 102 prefix (which is unlikely). 101 103 102 Change the compiler to be used. 104 --without-glpk 103 105 104 -DBUILD_SHARED_LIBS=TRUE 106 Disable GLPK support. 105 107 106 Build shared library instead of static one. Think twice if you 107 really want to use this option. 108 --with-cplex[=PREFIX] 108 109 109 -DGLPK_ROOT_DIR=DIRECTORY 110 -DCOIN_ROOT_DIR=DIRECTORY 111 -DCPLEX_ROOT_DIR=DIRECTORY 110 Enable CPLEX support (default). You should specify the prefix too 111 if you installed CPLEX to some non-standard location 112 (e.g. /opt/ilog/cplex75). If it is not found, CPLEX support will be 113 disabled. 112 114 113 Install root directory prefixes of optional third party libraries. 115 --with-cplex-includedir=DIR 116 117 The directory where the CPLEX header files are located. This is 118 only useful when the CPLEX headers and libraries are not under the 119 same prefix (e.g. /usr/local/cplex/cplex75/include). 120 121 --with-cplex-libdir=DIR 122 123 The directory where the CPLEX libraries are located. This is only 124 useful when the CPLEX headers and libraries are not under the same 125 prefix (e.g. 126 /usr/local/cplex/cplex75/lib/i86_linux2_glibc2.2_gcc3.0/static_pic_mt). 127 128 --without-cplex 129 130 Disable CPLEX support. 131 132 --with-soplex[=PREFIX] 133 134 Enable SoPlex support (default). You should specify the prefix too if 135 you installed SoPlex to some non-standard location (e.g. your home 136 directory). If it is not found, SoPlex support will be disabled. 137 138 --with-soplex-includedir=DIR 139 140 The directory where the SoPlex header files are located. This is only 141 useful when the SoPlex headers and libraries are not under the same 142 prefix (which is unlikely). 143 144 --with-soplex-libdir=DIR 145 146 The directory where the SoPlex libraries are located. This is only 147 useful when the SoPlex headers and libraries are not under the same 148 prefix (which is unlikely). 149 150 --without-soplex 151 152 Disable SoPlex support. 153 154 --with-coin[=PREFIX] 155 156 Enable support for COIN-OR solvers (CLP and CBC). You should 157 specify the prefix too. (by default, COIN-OR tools install 158 themselves to the source code directory). This command enables the 159 solvers that are actually found. 160 161 --with-coin-includedir=DIR 162 163 The directory where the COIN-OR header files are located. This is 164 only useful when the COIN-OR headers and libraries are not under 165 the same prefix (which is unlikely). 166 167 --with-coin-libdir=DIR 168 169 The directory where the COIN-OR libraries are located. This is only 170 useful when the COIN-OR headers and libraries are not under the 171 same prefix (which is unlikely). 172 173 --without-coin 174 175 Disable COIN-OR support. 176 114 177 115 178 Makefile Variables 116 179 ================== 117 180 118 make VERBOSE=1 181 Some Makefile variables are reserved by the GNU Coding Standards for 182 the use of the "user" - the person building the package. For instance, 183 CXX and CXXFLAGS are such variables, and have the same meaning as 184 explained in the previous section. These variables can be set on the 185 command line when invoking `make' like this: 186 `make [VARIABLE=VALUE]...' 119 187 120 This results in a more verbose output by showing the full 121 compiler and linker commands. 188 WARNINGCXXFLAGS is a non-standard Makefile variable introduced by us 189 to hold several compiler flags related to warnings. Its default value 190 can be overridden when invoking `make'. For example to disable all 191 warning flags use `make WARNINGCXXFLAGS='. 192 193 In order to turn off a single flag from the default set of warning 194 flags, you can use the CXXFLAGS variable, since this is passed after 195 WARNINGCXXFLAGS. For example to turn off `-Wold-style-cast' (which is 196 used by default when g++ is detected) you can use 197 `make CXXFLAGS="-g -O2 -Wno-old-style-cast"'. -
LICENSE
r992 r879 2 2 copyright/license. 3 3 4 Copyright (C) 2003-201 2Egervary Jeno Kombinatorikus Optimalizalasi4 Copyright (C) 2003-2010 Egervary Jeno Kombinatorikus Optimalizalasi 5 5 Kutatocsoport (Egervary Combinatorial Optimization Research Group, 6 6 EGRES). -
cmake/version.cmake.in
r983 r678 1 SET(LEMON_VERSION "@ LEMON_VERSION@" CACHE STRING "LEMON version string.")1 SET(LEMON_VERSION "@PACKAGE_VERSION@" CACHE STRING "LEMON version string.") -
doc/CMakeLists.txt
r983 r964 17 17 @ONLY 18 18 ) 19 20 # Copy doc from source (if exists)21 IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/html AND22 NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/html/index.html)23 MESSAGE(STATUS "Copy doc from source tree")24 EXECUTE_PROCESS(25 COMMAND cmake -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/html ${CMAKE_CURRENT_BINARY_DIR}/html26 )27 ENDIF()28 19 29 20 IF(DOXYGEN_EXECUTABLE AND PYTHONINTERP_FOUND AND GHOSTSCRIPT_EXECUTABLE) -
doc/Doxyfile.in
r966 r964 96 96 "@abs_top_srcdir@/lemon/concepts" \ 97 97 "@abs_top_srcdir@/demo" \ 98 "@abs_top_srcdir@/contrib" \99 98 "@abs_top_srcdir@/tools" \ 100 99 "@abs_top_srcdir@/test/test_tools.h" \ -
doc/coding_style.dox
r919 r440 99 99 \subsection pri-loc-var Private member variables 100 100 101 Private member variables should start with underscore .101 Private member variables should start with underscore 102 102 103 103 \code 104 _start_with_underscore 104 _start_with_underscores 105 105 \endcode 106 106 -
doc/dirs.dox
r925 r440 32 32 documentation. 33 33 */ 34 35 /**36 \dir contrib37 \brief Directory for user contributed source codes.38 39 You can place your own C++ code using LEMON into this directory, which40 will compile to an executable along with LEMON when you build the41 library. This is probably the easiest way of compiling short to medium42 codes, for this does require neither a LEMON installed system-wide nor43 adding several paths to the compiler.44 45 Please have a look at <tt>contrib/CMakeLists.txt</tt> for46 instruction on how to add your own files into the build process. */47 34 48 35 /** -
doc/groups.dox
r919 r879 407 407 strongly polynomial \ref klein67primal, \ref goldberg89cyclecanceling. 408 408 409 In general , \ref NetworkSimplex and \ref CostScaling are the most efficient410 implementations, but the other two algorithms could be faster in special cases.409 In general NetworkSimplex is the most efficient implementation, 410 but in special cases other algorithms could be faster. 411 411 For example, if the total supply and/or capacities are rather small, 412 \refCapacityScaling is usually the fastest algorithm (without effective scaling).412 CapacityScaling is usually the fastest algorithm (without effective scaling). 413 413 */ 414 414 … … 472 472 \ref dasdan98minmeancycle. 473 473 474 In practice, the \ref HowardMmc "Howard" algorithm turned outto be by far the474 In practice, the \ref HowardMmc "Howard" algorithm proved to be by far the 475 475 most efficient one, though the best known theoretical bound on its running 476 476 time is exponential. … … 540 540 541 541 /** 542 @defgroup planar Planar Embedding and Drawing542 @defgroup planar Planarity Embedding and Drawing 543 543 @ingroup algs 544 544 \brief Algorithms for planarity checking, embedding and drawing … … 552 552 553 553 /** 554 @defgroup approx _algsApproximation Algorithms554 @defgroup approx Approximation Algorithms 555 555 @ingroup algs 556 556 \brief Approximation algorithms. … … 558 558 This group contains the approximation and heuristic algorithms 559 559 implemented in LEMON. 560 561 <b>Maximum Clique Problem</b>562 - \ref GrossoLocatelliPullanMc An efficient heuristic algorithm of563 Grosso, Locatelli, and Pullan.564 560 */ 565 561 -
doc/references.bib
r904 r755 298 298 address = {Dublin, Ireland}, 299 299 year = 1991, 300 month = sep 301 } 302 303 %%%%% Other algorithms %%%%% 304 305 @article{grosso08maxclique, 306 author = {Andrea Grosso and Marco Locatelli and Wayne Pullan}, 307 title = {Simple ingredients leading to very efficient 308 heuristics for the maximum clique problem}, 309 journal = {Journal of Heuristics}, 310 year = 2008, 311 volume = 14, 312 number = 6, 313 pages = {587--612} 314 } 300 month = sep, 301 } -
lemon/CMakeLists.txt
r981 r968 5 5 6 6 CONFIGURE_FILE( 7 ${CMAKE_CURRENT_SOURCE_DIR}/config.h. in7 ${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake 8 8 ${CMAKE_CURRENT_BINARY_DIR}/config.h 9 9 ) 10 10 11 11 CONFIGURE_FILE( 12 ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc. in12 ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc.cmake 13 13 ${CMAKE_CURRENT_BINARY_DIR}/lemon.pc 14 14 @ONLY -
lemon/bits/alteration_notifier.h
r979 r440 24 24 25 25 #include <lemon/core.h> 26 #include <lemon/bits/lock.h>27 26 28 27 //\ingroup graphbits … … 253 252 typedef std::list<ObserverBase*> Observers; 254 253 Observers _observers; 255 lemon::bits::Lock _lock; 254 256 255 257 256 public: … … 334 333 335 334 void attach(ObserverBase& observer) { 336 _lock.lock();337 335 observer._index = _observers.insert(_observers.begin(), &observer); 338 336 observer._notifier = this; 339 _lock.unlock();340 337 } 341 338 342 339 void detach(ObserverBase& observer) { 343 _lock.lock();344 340 _observers.erase(observer._index); 345 341 observer._index = _observers.end(); 346 342 observer._notifier = 0; 347 _lock.unlock();348 343 } 349 344 -
lemon/bits/windows.cc
r979 r941 131 131 #endif 132 132 } 133 134 WinLock::WinLock() {135 #ifdef WIN32136 CRITICAL_SECTION *lock = new CRITICAL_SECTION;137 InitializeCriticalSection(lock);138 _repr = lock;139 #endif140 }141 142 WinLock::~WinLock() {143 #ifdef WIN32144 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);145 DeleteCriticalSection(lock);146 delete lock;147 #endif148 }149 150 void WinLock::lock() {151 #ifdef WIN32152 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);153 EnterCriticalSection(lock);154 #endif155 }156 157 void WinLock::unlock() {158 #ifdef WIN32159 CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);160 LeaveCriticalSection(lock);161 #endif162 }163 133 } 164 134 } -
lemon/bits/windows.h
r979 r529 29 29 std::string getWinFormattedDate(); 30 30 int getWinRndSeed(); 31 32 class WinLock {33 public:34 WinLock();35 ~WinLock();36 void lock();37 void unlock();38 private:39 void *_repr;40 };41 31 } 42 32 } -
lemon/capacity_scaling.h
r985 r877 87 87 /// consider to use the named template parameters instead. 88 88 /// 89 /// \warning Both \c V and \c C must be signed number types. 90 /// \warning Capacity bounds and supply values must be integer, but 91 /// arc costs can be arbitrary real numbers. 92 /// \warning This algorithm does not support negative costs for 93 /// arcs having infinite upper bound. 89 /// \warning Both number types must be signed and all input data must 90 /// be integer. 91 /// \warning This algorithm does not support negative costs for such 92 /// arcs that have infinite upper bound. 94 93 #ifdef DOXYGEN 95 94 template <typename GR, typename V, typename C, typename TR> … … 424 423 /// 425 424 /// Using this function has the same effect as using \ref supplyMap() 426 /// with a map in which \c k is assigned to \c s, \c -k is425 /// with such a map in which \c k is assigned to \c s, \c -k is 427 426 /// assigned to \c t and all other nodes have zero supply value. 428 427 /// -
lemon/config.h.in
r981 r678 1 #define LEMON_VERSION "@PROJECT_VERSION@" 2 #cmakedefine LEMON_HAVE_LONG_LONG 1 3 #cmakedefine LEMON_HAVE_LP 1 4 #cmakedefine LEMON_HAVE_MIP 1 5 #cmakedefine LEMON_HAVE_GLPK 1 6 #cmakedefine LEMON_HAVE_CPLEX 1 7 #cmakedefine LEMON_HAVE_CLP 1 8 #cmakedefine LEMON_HAVE_CBC 1 9 #cmakedefine LEMON_USE_PTHREAD 1 10 #cmakedefine LEMON_USE_WIN32_THREADS 1 1 /* The version string */ 2 #undef LEMON_VERSION 3 4 /* Define to 1 if you have long long */ 5 #undef LEMON_HAVE_LONG_LONG 6 7 /* Define to 1 if you have any LP solver. */ 8 #undef LEMON_HAVE_LP 9 10 /* Define to 1 if you have any MIP solver. */ 11 #undef LEMON_HAVE_MIP 12 13 /* Define to 1 if you have CPLEX. */ 14 #undef LEMON_HAVE_CPLEX 15 16 /* Define to 1 if you have GLPK. */ 17 #undef LEMON_HAVE_GLPK 18 19 /* Define to 1 if you have SOPLEX */ 20 #undef LEMON_HAVE_SOPLEX 21 22 /* Define to 1 if you have CLP */ 23 #undef LEMON_HAVE_CLP 24 25 /* Define to 1 if you have CBC */ 26 #undef LEMON_HAVE_CBC -
lemon/core.h
r1000 r998 447 447 448 448 } 449 450 /// \brief Check whether a graph is undirected.451 ///452 /// This function returns \c true if the given graph is undirected.453 #ifdef DOXYGEN454 template <typename GR>455 bool undirected(const GR& g) { return false; }456 #else457 template <typename GR>458 typename enable_if<UndirectedTagIndicator<GR>, bool>::type459 undirected(const GR&) {460 return true;461 }462 template <typename GR>463 typename disable_if<UndirectedTagIndicator<GR>, bool>::type464 undirected(const GR&) {465 return false;466 }467 #endif468 449 469 450 /// \brief Class to copy a digraph. -
lemon/cost_scaling.h
r938 r931 98 98 /// "preflow push-relabel" algorithm for the maximum flow problem. 99 99 /// 100 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest101 /// implementations available in LEMON for this problem.102 ///103 100 /// Most of the parameters of the problem (except for the digraph) 104 101 /// can be given using separate functions, and the algorithm can be … … 117 114 /// consider to use the named template parameters instead. 118 115 /// 119 /// \warning Both \c V and \c C must be signed number types. 120 /// \warning All input data (capacities, supply values, and costs) must 116 /// \warning Both number types must be signed and all input data must 121 117 /// be integer. 122 /// \warning This algorithm does not support negative costs for 123 /// arcs havinginfinite upper bound.118 /// \warning This algorithm does not support negative costs for such 119 /// arcs that have infinite upper bound. 124 120 /// 125 121 /// \note %CostScaling provides three different internal methods, … … 183 179 /// relabel operation. 184 180 /// By default, the so called \ref PARTIAL_AUGMENT 185 /// "Partial Augment-Relabel" method is used, which turned outto be181 /// "Partial Augment-Relabel" method is used, which proved to be 186 182 /// the most efficient and the most robust on various test inputs. 187 183 /// However, the other methods can be selected using the \ref run() … … 238 234 }; 239 235 236 typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap; 240 237 typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap; 241 238 … … 288 285 int _max_rank; 289 286 287 // Data for a StaticDigraph structure 288 typedef std::pair<int, int> IntPair; 289 StaticDigraph _sgr; 290 std::vector<IntPair> _arc_vec; 291 std::vector<LargeCost> _cost_vec; 292 LargeCostArcMap _cost_map; 293 LargeCostNodeMap _pi_map; 294 290 295 public: 291 296 … … 334 339 CostScaling(const GR& graph) : 335 340 _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph), 341 _cost_map(_cost_vec), _pi_map(_pi), 336 342 INF(std::numeric_limits<Value>::has_infinity ? 337 343 std::numeric_limits<Value>::infinity() : … … 442 448 /// 443 449 /// Using this function has the same effect as using \ref supplyMap() 444 /// with a map in which \c k is assigned to \c s, \c -k is450 /// with such a map in which \c k is assigned to \c s, \c -k is 445 451 /// assigned to \c t and all other nodes have zero supply value. 446 452 /// … … 488 494 /// \param method The internal method that will be used in the 489 495 /// algorithm. For more information, see \ref Method. 490 /// \param factor The cost scaling factor. It must be at least two.496 /// \param factor The cost scaling factor. It must be larger than one. 491 497 /// 492 498 /// \return \c INFEASIBLE if no feasible flow exists, … … 502 508 /// \see ProblemType, Method 503 509 /// \see resetParams(), reset() 504 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) { 505 LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2"); 510 ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) { 506 511 _alpha = factor; 507 512 ProblemType pt = init(); … … 567 572 } 568 573 569 /// \brief Reset the internal data structures and all the parameters 570 /// that have been given before. 571 /// 572 /// This function resets the internal data structures and all the 573 /// paramaters that have been given before using functions \ref lowerMap(), 574 /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(). 575 /// 576 /// It is useful for multiple \ref run() calls. By default, all the given 577 /// parameters are kept for the next \ref run() call, unless 578 /// \ref resetParams() or \ref reset() is used. 579 /// If the underlying digraph was also modified after the construction 580 /// of the class or the last \ref reset() call, then the \ref reset() 581 /// function must be used, otherwise \ref resetParams() is sufficient. 582 /// 583 /// See \ref resetParams() for examples. 584 /// 574 /// \brief Reset all the parameters that have been given before. 575 /// 576 /// This function resets all the paramaters that have been given 577 /// before using functions \ref lowerMap(), \ref upperMap(), 578 /// \ref costMap(), \ref supplyMap(), \ref stSupply(). 579 /// 580 /// It is useful for multiple run() calls. If this function is not 581 /// used, all the parameters given before are kept for the next 582 /// \ref run() call. 583 /// However, the underlying digraph must not be modified after this 584 /// class have been constructed, since it copies and extends the graph. 585 585 /// \return <tt>(*this)</tt> 586 ///587 /// \see resetParams(), run()588 586 CostScaling& reset() { 589 587 // Resize vectors … … 610 608 _excess.resize(_res_node_num); 611 609 _next_out.resize(_res_node_num); 610 611 _arc_vec.reserve(_res_arc_num); 612 _cost_vec.reserve(_res_arc_num); 612 613 613 614 // Copy the graph … … 886 887 } 887 888 889 return OPTIMAL; 890 } 891 892 // Execute the algorithm and transform the results 893 void start(Method method) { 894 // Maximum path length for partial augment 895 const int MAX_PATH_LENGTH = 4; 896 888 897 // Initialize data structures for buckets 889 898 _max_rank = _alpha * _res_node_num; … … 893 902 _rank.resize(_res_node_num + 1); 894 903 895 return OPTIMAL; 896 } 897 898 // Execute the algorithm and transform the results 899 void start(Method method) { 900 const int MAX_PARTIAL_PATH_LENGTH = 4; 901 904 // Execute the algorithm 902 905 switch (method) { 903 906 case PUSH: … … 908 911 break; 909 912 case PARTIAL_AUGMENT: 910 startAugment(MAX_PA RTIAL_PATH_LENGTH);913 startAugment(MAX_PATH_LENGTH); 911 914 break; 912 915 } 913 916 914 // Compute node potentials (dual solution) 915 for (int i = 0; i != _res_node_num; ++i) { 916 _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha)); 917 } 918 bool optimal = true; 919 for (int i = 0; optimal && i != _res_node_num; ++i) { 920 LargeCost pi_i = _pi[i]; 921 int last_out = _first_out[i+1]; 922 for (int j = _first_out[i]; j != last_out; ++j) { 923 if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) { 924 optimal = false; 925 break; 926 } 927 } 928 } 929 930 if (!optimal) { 931 // Compute node potentials for the original costs with BellmanFord 932 // (if it is necessary) 933 typedef std::pair<int, int> IntPair; 934 StaticDigraph sgr; 935 std::vector<IntPair> arc_vec; 936 std::vector<LargeCost> cost_vec; 937 LargeCostArcMap cost_map(cost_vec); 938 939 arc_vec.clear(); 940 cost_vec.clear(); 941 for (int j = 0; j != _res_arc_num; ++j) { 942 if (_res_cap[j] > 0) { 943 int u = _source[j], v = _target[j]; 944 arc_vec.push_back(IntPair(u, v)); 945 cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]); 946 } 947 } 948 sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end()); 949 950 typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create 951 bf(sgr, cost_map); 952 bf.init(0); 953 bf.start(); 954 955 for (int i = 0; i != _res_node_num; ++i) { 956 _pi[i] += bf.dist(sgr.node(i)); 957 } 958 } 959 960 // Shift potentials to meet the requirements of the GEQ type 961 // optimality conditions 962 LargeCost max_pot = _pi[_root]; 963 for (int i = 0; i != _res_node_num; ++i) { 964 if (_pi[i] > max_pot) max_pot = _pi[i]; 965 } 966 if (max_pot != 0) { 967 for (int i = 0; i != _res_node_num; ++i) { 968 _pi[i] -= max_pot; 969 } 970 } 917 // Compute node potentials for the original costs 918 _arc_vec.clear(); 919 _cost_vec.clear(); 920 for (int j = 0; j != _res_arc_num; ++j) { 921 if (_res_cap[j] > 0) { 922 _arc_vec.push_back(IntPair(_source[j], _target[j])); 923 _cost_vec.push_back(_scost[j]); 924 } 925 } 926 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 927 928 typename BellmanFord<StaticDigraph, LargeCostArcMap> 929 ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map); 930 bf.distMap(_pi_map); 931 bf.init(0); 932 bf.start(); 971 933 972 934 // Handle non-zero lower bounds … … 986 948 LargeCost pi_u = _pi[u]; 987 949 for (int a = _first_out[u]; a != last_out; ++a) { 988 Value delta = _res_cap[a]; 989 if (delta > 0) { 990 int v = _target[a]; 991 if (_cost[a] + pi_u - _pi[v] < 0) { 992 _excess[u] -= delta; 993 _excess[v] += delta; 994 _res_cap[a] = 0; 995 _res_cap[_reverse[a]] += delta; 996 } 950 int v = _target[a]; 951 if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) { 952 Value delta = _res_cap[a]; 953 _excess[u] -= delta; 954 _excess[v] += delta; 955 _res_cap[a] = 0; 956 _res_cap[_reverse[a]] += delta; 997 957 } 998 958 } … … 1010 970 } 1011 971 1012 // Price (potential) refinement heuristic 1013 bool priceRefinement() { 1014 1015 // Stack for stroing the topological order 1016 IntVector stack(_res_node_num); 1017 int stack_top; 1018 1019 // Perform phases 1020 while (topologicalSort(stack, stack_top)) { 1021 1022 // Compute node ranks in the acyclic admissible network and 1023 // store the nodes in buckets 1024 for (int i = 0; i != _res_node_num; ++i) { 1025 _rank[i] = 0; 1026 } 1027 const int bucket_end = _root + 1; 1028 for (int r = 0; r != _max_rank; ++r) { 1029 _buckets[r] = bucket_end; 1030 } 1031 int top_rank = 0; 1032 for ( ; stack_top >= 0; --stack_top) { 1033 int u = stack[stack_top], v; 1034 int rank_u = _rank[u]; 1035 1036 LargeCost rc, pi_u = _pi[u]; 1037 int last_out = _first_out[u+1]; 1038 for (int a = _first_out[u]; a != last_out; ++a) { 1039 if (_res_cap[a] > 0) { 1040 v = _target[a]; 1041 rc = _cost[a] + pi_u - _pi[v]; 1042 if (rc < 0) { 1043 LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon); 1044 if (nrc < LargeCost(_max_rank)) { 1045 int new_rank_v = rank_u + static_cast<int>(nrc); 1046 if (new_rank_v > _rank[v]) { 1047 _rank[v] = new_rank_v; 1048 } 1049 } 1050 } 1051 } 1052 } 1053 1054 if (rank_u > 0) { 1055 top_rank = std::max(top_rank, rank_u); 1056 int bfirst = _buckets[rank_u]; 1057 _bucket_next[u] = bfirst; 1058 _bucket_prev[bfirst] = u; 1059 _buckets[rank_u] = u; 1060 } 1061 } 1062 1063 // Check if the current flow is epsilon-optimal 1064 if (top_rank == 0) { 1065 return true; 1066 } 1067 1068 // Process buckets in top-down order 1069 for (int rank = top_rank; rank > 0; --rank) { 1070 while (_buckets[rank] != bucket_end) { 1071 // Remove the first node from the current bucket 1072 int u = _buckets[rank]; 1073 _buckets[rank] = _bucket_next[u]; 1074 1075 // Search the outgoing arcs of u 1076 LargeCost rc, pi_u = _pi[u]; 1077 int last_out = _first_out[u+1]; 1078 int v, old_rank_v, new_rank_v; 1079 for (int a = _first_out[u]; a != last_out; ++a) { 1080 if (_res_cap[a] > 0) { 1081 v = _target[a]; 1082 old_rank_v = _rank[v]; 1083 1084 if (old_rank_v < rank) { 1085 1086 // Compute the new rank of node v 1087 rc = _cost[a] + pi_u - _pi[v]; 1088 if (rc < 0) { 1089 new_rank_v = rank; 1090 } else { 1091 LargeCost nrc = rc / _epsilon; 1092 new_rank_v = 0; 1093 if (nrc < LargeCost(_max_rank)) { 1094 new_rank_v = rank - 1 - static_cast<int>(nrc); 1095 } 1096 } 1097 1098 // Change the rank of node v 1099 if (new_rank_v > old_rank_v) { 1100 _rank[v] = new_rank_v; 1101 1102 // Remove v from its old bucket 1103 if (old_rank_v > 0) { 1104 if (_buckets[old_rank_v] == v) { 1105 _buckets[old_rank_v] = _bucket_next[v]; 1106 } else { 1107 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1108 _bucket_next[pv] = nv; 1109 _bucket_prev[nv] = pv; 1110 } 1111 } 1112 1113 // Insert v into its new bucket 1114 int nv = _buckets[new_rank_v]; 1115 _bucket_next[v] = nv; 1116 _bucket_prev[nv] = v; 1117 _buckets[new_rank_v] = v; 1118 } 1119 } 1120 } 1121 } 1122 1123 // Refine potential of node u 1124 _pi[u] -= rank * _epsilon; 1125 } 1126 } 1127 1128 } 1129 1130 return false; 1131 } 1132 1133 // Find and cancel cycles in the admissible network and 1134 // determine topological order using DFS 1135 bool topologicalSort(IntVector &stack, int &stack_top) { 1136 const int MAX_CYCLE_CANCEL = 1; 1137 1138 BoolVector reached(_res_node_num, false); 1139 BoolVector processed(_res_node_num, false); 1140 IntVector pred(_res_node_num); 1141 for (int i = 0; i != _res_node_num; ++i) { 1142 _next_out[i] = _first_out[i]; 1143 } 1144 stack_top = -1; 1145 1146 int cycle_cnt = 0; 1147 for (int start = 0; start != _res_node_num; ++start) { 1148 if (reached[start]) continue; 1149 1150 // Start DFS search from this start node 1151 pred[start] = -1; 1152 int tip = start, v; 1153 while (true) { 1154 // Check the outgoing arcs of the current tip node 1155 reached[tip] = true; 1156 LargeCost pi_tip = _pi[tip]; 1157 int a, last_out = _first_out[tip+1]; 1158 for (a = _next_out[tip]; a != last_out; ++a) { 1159 if (_res_cap[a] > 0) { 1160 v = _target[a]; 1161 if (_cost[a] + pi_tip - _pi[v] < 0) { 1162 if (!reached[v]) { 1163 // A new node is reached 1164 reached[v] = true; 1165 pred[v] = tip; 1166 _next_out[tip] = a; 1167 tip = v; 1168 a = _next_out[tip]; 1169 last_out = _first_out[tip+1]; 1170 break; 1171 } 1172 else if (!processed[v]) { 1173 // A cycle is found 1174 ++cycle_cnt; 1175 _next_out[tip] = a; 1176 1177 // Find the minimum residual capacity along the cycle 1178 Value d, delta = _res_cap[a]; 1179 int u, delta_node = tip; 1180 for (u = tip; u != v; ) { 1181 u = pred[u]; 1182 d = _res_cap[_next_out[u]]; 1183 if (d <= delta) { 1184 delta = d; 1185 delta_node = u; 1186 } 1187 } 1188 1189 // Augment along the cycle 1190 _res_cap[a] -= delta; 1191 _res_cap[_reverse[a]] += delta; 1192 for (u = tip; u != v; ) { 1193 u = pred[u]; 1194 int ca = _next_out[u]; 1195 _res_cap[ca] -= delta; 1196 _res_cap[_reverse[ca]] += delta; 1197 } 1198 1199 // Check the maximum number of cycle canceling 1200 if (cycle_cnt >= MAX_CYCLE_CANCEL) { 1201 return false; 1202 } 1203 1204 // Roll back search to delta_node 1205 if (delta_node != tip) { 1206 for (u = tip; u != delta_node; u = pred[u]) { 1207 reached[u] = false; 1208 } 1209 tip = delta_node; 1210 a = _next_out[tip] + 1; 1211 last_out = _first_out[tip+1]; 1212 break; 1213 } 1214 } 1215 } 1216 } 1217 } 1218 1219 // Step back to the previous node 1220 if (a == last_out) { 1221 processed[tip] = true; 1222 stack[++stack_top] = tip; 1223 tip = pred[tip]; 1224 if (tip < 0) { 1225 // Finish DFS from the current start node 1226 break; 1227 } 1228 ++_next_out[tip]; 1229 } 1230 } 1231 1232 } 1233 1234 return (cycle_cnt == 0); 972 // Early termination heuristic 973 bool earlyTermination() { 974 const double EARLY_TERM_FACTOR = 3.0; 975 976 // Build a static residual graph 977 _arc_vec.clear(); 978 _cost_vec.clear(); 979 for (int j = 0; j != _res_arc_num; ++j) { 980 if (_res_cap[j] > 0) { 981 _arc_vec.push_back(IntPair(_source[j], _target[j])); 982 _cost_vec.push_back(_cost[j] + 1); 983 } 984 } 985 _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end()); 986 987 // Run Bellman-Ford algorithm to check if the current flow is optimal 988 BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map); 989 bf.init(0); 990 bool done = false; 991 int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num))); 992 for (int i = 0; i < K && !done; ++i) { 993 done = bf.processNextWeakRound(); 994 } 995 return done; 1235 996 } 1236 997 1237 998 // Global potential update heuristic 1238 999 void globalUpdate() { 1239 constint bucket_end = _root + 1;1000 int bucket_end = _root + 1; 1240 1001 1241 1002 // Initialize buckets … … 1244 1005 } 1245 1006 Value total_excess = 0; 1246 int b0 = bucket_end;1247 1007 for (int i = 0; i != _res_node_num; ++i) { 1248 1008 if (_excess[i] < 0) { 1249 1009 _rank[i] = 0; 1250 _bucket_next[i] = b0;1251 _bucket_prev[ b0] = i;1252 b0= i;1010 _bucket_next[i] = _buckets[0]; 1011 _bucket_prev[_buckets[0]] = i; 1012 _buckets[0] = i; 1253 1013 } else { 1254 1014 total_excess += _excess[i]; … … 1257 1017 } 1258 1018 if (total_excess == 0) return; 1259 _buckets[0] = b0;1260 1019 1261 1020 // Search the buckets … … 1279 1038 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon; 1280 1039 int new_rank_v = old_rank_v; 1281 if (nrc < LargeCost(_max_rank)) { 1282 new_rank_v = r + 1 + static_cast<int>(nrc); 1283 } 1040 if (nrc < LargeCost(_max_rank)) 1041 new_rank_v = r + 1 + int(nrc); 1284 1042 1285 1043 // Change the rank of v … … 1293 1051 _buckets[old_rank_v] = _bucket_next[v]; 1294 1052 } else { 1295 int pv = _bucket_prev[v], nv = _bucket_next[v]; 1296 _bucket_next[pv] = nv; 1297 _bucket_prev[nv] = pv; 1053 _bucket_next[_bucket_prev[v]] = _bucket_next[v]; 1054 _bucket_prev[_bucket_next[v]] = _bucket_prev[v]; 1298 1055 } 1299 1056 } 1300 1057 1301 // Insert v into its new bucket 1302 int nv = _buckets[new_rank_v]; 1303 _bucket_next[v] = nv; 1304 _bucket_prev[nv] = v; 1058 // Insert v to its new bucket 1059 _bucket_next[v] = _buckets[new_rank_v]; 1060 _bucket_prev[_buckets[new_rank_v]] = v; 1305 1061 _buckets[new_rank_v] = v; 1306 1062 } … … 1331 1087 void startAugment(int max_length) { 1332 1088 // Paramters for heuristics 1333 const int PRICE_REFINEMENT_LIMIT = 2; 1334 const double GLOBAL_UPDATE_FACTOR = 1.0; 1335 const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR * 1089 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1090 const double GLOBAL_UPDATE_FACTOR = 3.0; 1091 1092 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1336 1093 (_res_node_num + _sup_node_num * _sup_node_num)); 1337 int next_global_update_limit = global_update_skip; 1094 int next_update_limit = global_update_freq; 1095 1096 int relabel_cnt = 0; 1338 1097 1339 1098 // Perform cost scaling phases 1340 IntVector path; 1341 BoolVector path_arc(_res_arc_num, false); 1342 int relabel_cnt = 0; 1343 int eps_phase_cnt = 0; 1099 std::vector<int> path; 1344 1100 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1345 1101 1 : _epsilon / _alpha ) 1346 1102 { 1347 ++eps_phase_cnt; 1348 1349 // Price refinement heuristic 1350 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1351 if (priceRefinement()) continue; 1103 // Early termination heuristic 1104 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1105 if (earlyTermination()) break; 1352 1106 } 1353 1107 … … 1366 1120 1367 1121 // Find an augmenting path from the start node 1122 path.clear(); 1368 1123 int tip = start; 1369 while ( int(path.size()) < max_length && _excess[tip] >= 0) {1124 while (_excess[tip] >= 0 && int(path.size()) < max_length) { 1370 1125 int u; 1371 LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max(); 1372 LargeCost pi_tip = _pi[tip]; 1126 LargeCost min_red_cost, rc, pi_tip = _pi[tip]; 1373 1127 int last_out = _first_out[tip+1]; 1374 1128 for (int a = _next_out[tip]; a != last_out; ++a) { 1375 if (_res_cap[a] > 0) { 1376 u = _target[a]; 1377 rc = _cost[a] + pi_tip - _pi[u]; 1378 if (rc < 0) { 1379 path.push_back(a); 1380 _next_out[tip] = a; 1381 if (path_arc[a]) { 1382 goto augment; // a cycle is found, stop path search 1383 } 1384 tip = u; 1385 path_arc[a] = true; 1386 goto next_step; 1387 } 1388 else if (rc < min_red_cost) { 1389 min_red_cost = rc; 1390 } 1129 u = _target[a]; 1130 if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) { 1131 path.push_back(a); 1132 _next_out[tip] = a; 1133 tip = u; 1134 goto next_step; 1391 1135 } 1392 1136 } 1393 1137 1394 1138 // Relabel tip node 1139 min_red_cost = std::numeric_limits<LargeCost>::max(); 1395 1140 if (tip != start) { 1396 1141 int ra = _reverse[path.back()]; 1397 min_red_cost = 1398 std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]); 1142 min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]]; 1399 1143 } 1400 last_out = _next_out[tip];1401 1144 for (int a = _first_out[tip]; a != last_out; ++a) { 1402 if (_res_cap[a] > 0) { 1403 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1404 if (rc < min_red_cost) { 1405 min_red_cost = rc; 1406 } 1145 rc = _cost[a] + pi_tip - _pi[_target[a]]; 1146 if (_res_cap[a] > 0 && rc < min_red_cost) { 1147 min_red_cost = rc; 1407 1148 } 1408 1149 } … … 1413 1154 // Step back 1414 1155 if (tip != start) { 1415 int pa = path.back(); 1416 path_arc[pa] = false; 1417 tip = _source[pa]; 1156 tip = _source[path.back()]; 1418 1157 path.pop_back(); 1419 1158 } … … 1423 1162 1424 1163 // Augment along the found path (as much flow as possible) 1425 augment:1426 1164 Value delta; 1427 1165 int pa, u, v = start; … … 1430 1168 u = v; 1431 1169 v = _target[pa]; 1432 path_arc[pa] = false;1433 1170 delta = std::min(_res_cap[pa], _excess[u]); 1434 1171 _res_cap[pa] -= delta; … … 1436 1173 _excess[u] -= delta; 1437 1174 _excess[v] += delta; 1438 if (_excess[v] > 0 && _excess[v] <= delta) {1175 if (_excess[v] > 0 && _excess[v] <= delta) 1439 1176 _active_nodes.push_back(v); 1440 }1441 1177 } 1442 path.clear();1443 1178 1444 1179 // Global update heuristic 1445 if (relabel_cnt >= next_ global_update_limit) {1180 if (relabel_cnt >= next_update_limit) { 1446 1181 globalUpdate(); 1447 next_ global_update_limit += global_update_skip;1182 next_update_limit += global_update_freq; 1448 1183 } 1449 1184 } 1450 1451 } 1452 1185 } 1453 1186 } 1454 1187 … … 1456 1189 void startPush() { 1457 1190 // Paramters for heuristics 1458 const int PRICE_REFINEMENT_LIMIT = 2;1191 const int EARLY_TERM_EPSILON_LIMIT = 1000; 1459 1192 const double GLOBAL_UPDATE_FACTOR = 2.0; 1460 1193 1461 const int global_update_ skip = static_cast<int>(GLOBAL_UPDATE_FACTOR *1194 const int global_update_freq = int(GLOBAL_UPDATE_FACTOR * 1462 1195 (_res_node_num + _sup_node_num * _sup_node_num)); 1463 int next_global_update_limit = global_update_skip; 1196 int next_update_limit = global_update_freq; 1197 1198 int relabel_cnt = 0; 1464 1199 1465 1200 // Perform cost scaling phases 1466 1201 BoolVector hyper(_res_node_num, false); 1467 1202 LargeCostVector hyper_cost(_res_node_num); 1468 int relabel_cnt = 0;1469 int eps_phase_cnt = 0;1470 1203 for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ? 1471 1204 1 : _epsilon / _alpha ) 1472 1205 { 1473 ++eps_phase_cnt; 1474 1475 // Price refinement heuristic 1476 if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) { 1477 if (priceRefinement()) continue; 1206 // Early termination heuristic 1207 if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) { 1208 if (earlyTermination()) break; 1478 1209 } 1479 1210 … … 1547 1278 std::numeric_limits<LargeCost>::max(); 1548 1279 for (int a = _first_out[n]; a != last_out; ++a) { 1549 if (_res_cap[a] > 0) { 1550 rc = _cost[a] + pi_n - _pi[_target[a]]; 1551 if (rc < min_red_cost) { 1552 min_red_cost = rc; 1553 } 1280 rc = _cost[a] + pi_n - _pi[_target[a]]; 1281 if (_res_cap[a] > 0 && rc < min_red_cost) { 1282 min_red_cost = rc; 1554 1283 } 1555 1284 } … … 1569 1298 1570 1299 // Global update heuristic 1571 if (relabel_cnt >= next_ global_update_limit) {1300 if (relabel_cnt >= next_update_limit) { 1572 1301 globalUpdate(); 1573 1302 for (int u = 0; u != _res_node_num; ++u) 1574 1303 hyper[u] = false; 1575 next_ global_update_limit += global_update_skip;1304 next_update_limit += global_update_freq; 1576 1305 } 1577 1306 } -
lemon/cycle_canceling.h
r922 r877 66 66 /// algorithm. By default, it is the same as \c V. 67 67 /// 68 /// \warning Both \c V and \c C must be signed number types. 69 /// \warning All input data (capacities, supply values, and costs) must 68 /// \warning Both number types must be signed and all input data must 70 69 /// be integer. 71 /// \warning This algorithm does not support negative costs for 72 /// arcs havinginfinite upper bound.70 /// \warning This algorithm does not support negative costs for such 71 /// arcs that have infinite upper bound. 73 72 /// 74 73 /// \note For more information about the three available methods, … … 118 117 /// \ref CycleCanceling provides three different cycle-canceling 119 118 /// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel and Tighten" 120 /// is used, which is by far the most efficient and the most robust. 119 /// is used, which proved to be the most efficient and the most robust 120 /// on various test inputs. 121 121 /// However, the other methods can be selected using the \ref run() 122 122 /// function with the proper parameter. … … 350 350 /// 351 351 /// Using this function has the same effect as using \ref supplyMap() 352 /// with a map in which \c k is assigned to \c s, \c -k is352 /// with such a map in which \c k is assigned to \c s, \c -k is 353 353 /// assigned to \c t and all other nodes have zero supply value. 354 354 /// -
lemon/euler.h
r919 r877 37 37 ///Euler tour iterator for digraphs. 38 38 39 /// \ingroup graph_prop erties39 /// \ingroup graph_prop 40 40 ///This iterator provides an Euler tour (Eulerian circuit) of a \e directed 41 41 ///graph (if there exists) and it converts to the \c Arc type of the digraph. -
lemon/hao_orlin.h
r915 r877 54 54 /// preflow push-relabel algorithm. Our implementation calculates 55 55 /// the minimum cut in \f$ O(n^2\sqrt{m}) \f$ time (we use the 56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. A notable57 /// use of this algorithm istesting network reliability.56 /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. The 57 /// purpose of such algorithm is e.g. testing network reliability. 58 58 /// 59 59 /// For an undirected graph you can run just the first phase of the … … 913 913 /// source-side (i.e. a set \f$ X\subsetneq V \f$ with 914 914 /// \f$ source \in X \f$ and minimal outgoing capacity). 915 /// It updates the stored cut if (and only if) the newly found one916 /// is better.917 915 /// 918 916 /// \pre \ref init() must be called before using this function. … … 927 925 /// sink-side (i.e. a set \f$ X\subsetneq V \f$ with 928 926 /// \f$ source \notin X \f$ and minimal outgoing capacity). 929 /// It updates the stored cut if (and only if) the newly found one930 /// is better.931 927 /// 932 928 /// \pre \ref init() must be called before using this function. … … 938 934 /// \brief Run the algorithm. 939 935 /// 940 /// This function runs the algorithm. It chooses source node,941 /// then calls \ref init(), \ref calculateOut()936 /// This function runs the algorithm. It finds nodes \c source and 937 /// \c target arbitrarily and then calls \ref init(), \ref calculateOut() 942 938 /// and \ref calculateIn(). 943 939 void run() { … … 949 945 /// \brief Run the algorithm. 950 946 /// 951 /// This function runs the algorithm. It calls \ref init(),952 /// \ref calculateOut() and \ref calculateIn() with the given953 /// source node.947 /// This function runs the algorithm. It uses the given \c source node, 948 /// finds a proper \c target node and then calls the \ref init(), 949 /// \ref calculateOut() and \ref calculateIn(). 954 950 void run(const Node& s) { 955 951 init(s); … … 970 966 /// \brief Return the value of the minimum cut. 971 967 /// 972 /// This function returns the value of the best cut found by the 973 /// previously called \ref run(), \ref calculateOut() or \ref 974 /// calculateIn(). 968 /// This function returns the value of the minimum cut. 975 969 /// 976 970 /// \pre \ref run(), \ref calculateOut() or \ref calculateIn() … … 983 977 /// \brief Return a minimum cut. 984 978 /// 985 /// This function gives the best cut found by the 986 /// previously called \ref run(), \ref calculateOut() or \ref 987 /// calculateIn(). 988 /// 989 /// It sets \c cutMap to the characteristic vector of the found 990 /// minimum value cut - a non-empty set \f$ X\subsetneq V \f$ 991 /// of minimum outgoing capacity (i.e. \c cutMap will be \c true exactly 979 /// This function sets \c cutMap to the characteristic vector of a 980 /// minimum value cut: it will give a non-empty set \f$ X\subsetneq V \f$ 981 /// with minimal outgoing capacity (i.e. \c cutMap will be \c true exactly 992 982 /// for the nodes of \f$ X \f$). 993 983 /// -
lemon/kruskal.h
r921 r584 31 31 ///\file 32 32 ///\brief Kruskal's algorithm to compute a minimum cost spanning tree 33 /// 34 ///Kruskal's algorithm to compute a minimum cost spanning tree. 35 /// 33 36 34 37 namespace lemon { -
lemon/lemon.pc.in
r981 r658 1 prefix=@ CMAKE_INSTALL_PREFIX@2 exec_prefix=@ CMAKE_INSTALL_PREFIX@/bin3 libdir=@ CMAKE_INSTALL_PREFIX@/lib4 includedir=@ CMAKE_INSTALL_PREFIX@/include1 prefix=@prefix@ 2 exec_prefix=@exec_prefix@ 3 libdir=@libdir@ 4 includedir=@includedir@ 5 5 6 Name: @P ROJECT_NAME@6 Name: @PACKAGE_NAME@ 7 7 Description: Library for Efficient Modeling and Optimization in Networks 8 Version: @P ROJECT_VERSION@8 Version: @PACKAGE_VERSION@ 9 9 Libs: -L${libdir} -lemon @GLPK_LIBS@ @CPLEX_LIBS@ @SOPLEX_LIBS@ @CLP_LIBS@ @CBC_LIBS@ 10 10 Cflags: -I${includedir} -
lemon/network_simplex.h
r984 r889 48 48 /// flow problem. 49 49 /// 50 /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest51 /// i mplementations available in LEMON for this problem.52 /// Furthermore, this class supports both directions of the supply/demand53 /// inequalityconstraints. For more information, see \ref SupplyType.50 /// In general, %NetworkSimplex is the fastest implementation available 51 /// in LEMON for this problem. 52 /// Moreover, it supports both directions of the supply/demand inequality 53 /// constraints. For more information, see \ref SupplyType. 54 54 /// 55 55 /// Most of the parameters of the problem (except for the digraph) … … 64 64 /// algorithm. By default, it is the same as \c V. 65 65 /// 66 /// \warning Both \c V and \c C must be signed number types. 67 /// \warning All input data (capacities, supply values, and costs) must 66 /// \warning Both number types must be signed and all input data must 68 67 /// be integer. 69 68 /// … … 123 122 /// the \ref run() function. 124 123 /// 125 /// \ref NetworkSimplex provides five different implementations for126 /// the pivot strategy that significantly affectsthe running time124 /// \ref NetworkSimplex provides five different pivot rule 125 /// implementations that significantly affect the running time 127 126 /// of the algorithm. 128 /// According to experimental tests conducted on various problem 129 /// instances, \ref BLOCK_SEARCH "Block Search" and 130 /// \ref ALTERING_LIST "Altering Candidate List" rules turned out 131 /// to be the most efficient. 132 /// Since \ref BLOCK_SEARCH "Block Search" is a simpler strategy that 133 /// seemed to be slightly more robust, it is used by default. 134 /// However, another pivot rule can easily be selected using the 135 /// \ref run() function with the proper parameter. 127 /// By default, \ref BLOCK_SEARCH "Block Search" is used, which 128 /// proved to be the most efficient and the most robust on various 129 /// test inputs. 130 /// However, another pivot rule can be selected using the \ref run() 131 /// function with the proper parameter. 136 132 enum PivotRule { 137 133 … … 159 155 /// The \e Altering \e Candidate \e List pivot rule. 160 156 /// It is a modified version of the Candidate List method. 161 /// It keeps only a few of thebest eligible arcs from the former157 /// It keeps only the several best eligible arcs from the former 162 158 /// candidate list and extends this list in every iteration. 163 159 ALTERING_LIST … … 171 167 typedef std::vector<Value> ValueVector; 172 168 typedef std::vector<Cost> CostVector; 173 typedef std::vector<signed char> CharVector; 174 // Note: vector<signed char> is used instead of vector<ArcState> and 175 // vector<ArcDirection> for efficiency reasons 169 typedef std::vector<char> BoolVector; 170 // Note: vector<char> is used instead of vector<bool> for efficiency reasons 176 171 177 172 // State constants for arcs … … 182 177 }; 183 178 184 // Direction constants for tree arcs 185 enum ArcDirection { 186 DIR_DOWN = -1, 187 DIR_UP = 1 188 }; 179 typedef std::vector<signed char> StateVector; 180 // Note: vector<signed char> is used instead of vector<ArcState> for 181 // efficiency reasons 189 182 190 183 private: … … 225 218 IntVector _succ_num; 226 219 IntVector _last_succ; 227 CharVector _pred_dir;228 CharVector _state;229 220 IntVector _dirty_revs; 221 BoolVector _forward; 222 StateVector _state; 230 223 int _root; 231 224 232 225 // Temporary data used in the current pivot iteration 233 226 int in_arc, join, u_in, v_in, u_out, v_out; 227 int first, second, right, last; 228 int stem, par_stem, new_stem; 234 229 Value delta; 235 230 … … 256 251 const IntVector &_target; 257 252 const CostVector &_cost; 258 const CharVector &_state;253 const StateVector &_state; 259 254 const CostVector &_pi; 260 255 int &_in_arc; … … 308 303 const IntVector &_target; 309 304 const CostVector &_cost; 310 const CharVector &_state;305 const StateVector &_state; 311 306 const CostVector &_pi; 312 307 int &_in_arc; … … 347 342 const IntVector &_target; 348 343 const CostVector &_cost; 349 const CharVector &_state;344 const StateVector &_state; 350 345 const CostVector &_pi; 351 346 int &_in_arc; … … 420 415 const IntVector &_target; 421 416 const CostVector &_cost; 422 const CharVector &_state;417 const StateVector &_state; 423 418 const CostVector &_pi; 424 419 int &_in_arc; … … 523 518 const IntVector &_target; 524 519 const CostVector &_cost; 525 const CharVector &_state;520 const StateVector &_state; 526 521 const CostVector &_pi; 527 522 int &_in_arc; … … 542 537 SortFunc(const CostVector &map) : _map(map) {} 543 538 bool operator()(int left, int right) { 544 return _map[left] <_map[right];539 return _map[left] > _map[right]; 545 540 } 546 541 }; … … 560 555 const double BLOCK_SIZE_FACTOR = 1.0; 561 556 const int MIN_BLOCK_SIZE = 10; 562 const double HEAD_LENGTH_FACTOR = 0. 01;557 const double HEAD_LENGTH_FACTOR = 0.1; 563 558 const int MIN_HEAD_LENGTH = 3; 564 559 … … 576 571 // Check the current candidate list 577 572 int e; 578 Cost c;579 573 for (int i = 0; i != _curr_length; ++i) { 580 574 e = _candidates[i]; 581 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 582 if (c < 0) { 583 _cand_cost[e] = c; 584 } else { 575 _cand_cost[e] = _state[e] * 576 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 577 if (_cand_cost[e] >= 0) { 585 578 _candidates[i--] = _candidates[--_curr_length]; 586 579 } … … 592 585 593 586 for (e = _next_arc; e != _search_arc_num; ++e) { 594 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);595 if (c < 0) {596 _cand_cost[e] = c;587 _cand_cost[e] = _state[e] * 588 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 589 if (_cand_cost[e] < 0) { 597 590 _candidates[_curr_length++] = e; 598 591 } … … 604 597 } 605 598 for (e = 0; e != _next_arc; ++e) { 606 c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);607 if (c < 0) {608 _cand_cost[e] = c;599 _cand_cost[e] = _state[e] * 600 (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); 601 if (_cand_cost[e] < 0) { 609 602 _candidates[_curr_length++] = e; 610 603 } … … 619 612 search_end: 620 613 621 // Perform partial sort operation on the candidate list 622 int new_length = std::min(_head_length + 1, _curr_length); 623 std::partial_sort(_candidates.begin(), _candidates.begin() + new_length, 624 _candidates.begin() + _curr_length, _sort_func); 625 626 // Select the entering arc and remove it from the list 614 // Make heap of the candidate list (approximating a partial sort) 615 make_heap( _candidates.begin(), _candidates.begin() + _curr_length, 616 _sort_func ); 617 618 // Pop the first element of the heap 627 619 _in_arc = _candidates[0]; 628 620 _next_arc = e; 629 _candidates[0] = _candidates[new_length - 1]; 630 _curr_length = new_length - 1; 621 pop_heap( _candidates.begin(), _candidates.begin() + _curr_length, 622 _sort_func ); 623 _curr_length = std::min(_head_length, _curr_length - 1); 631 624 return true; 632 625 } … … 641 634 /// 642 635 /// \param graph The digraph the algorithm runs on. 643 /// \param arc_mixing Indicate if the arcs willbe stored in a636 /// \param arc_mixing Indicate if the arcs have to be stored in a 644 637 /// mixed order in the internal data structure. 645 /// In general, it leads to similar performance as using the original 646 /// arc order, but it makes the algorithm more robust and in special 647 /// cases, even significantly faster. Therefore, it is enabled by default. 648 NetworkSimplex(const GR& graph, bool arc_mixing = true) : 638 /// In special cases, it could lead to better overall performance, 639 /// but it is usually slower. Therefore it is disabled by default. 640 NetworkSimplex(const GR& graph, bool arc_mixing = false) : 649 641 _graph(graph), _node_id(graph), _arc_id(graph), 650 642 _arc_mixing(arc_mixing), … … 739 731 /// 740 732 /// \return <tt>(*this)</tt> 741 ///742 /// \sa supplyType()743 733 template<typename SupplyMap> 744 734 NetworkSimplex& supplyMap(const SupplyMap& map) { … … 757 747 /// 758 748 /// Using this function has the same effect as using \ref supplyMap() 759 /// with a map in which \c k is assigned to \c s, \c -k is749 /// with such a map in which \c k is assigned to \c s, \c -k is 760 750 /// assigned to \c t and all other nodes have zero supply value. 761 751 /// … … 924 914 _parent.resize(all_node_num); 925 915 _pred.resize(all_node_num); 926 _ pred_dir.resize(all_node_num);916 _forward.resize(all_node_num); 927 917 _thread.resize(all_node_num); 928 918 _rev_thread.resize(all_node_num); … … 938 928 if (_arc_mixing) { 939 929 // Store the arcs in a mixed order 940 const int skip = std::max(_arc_num / _node_num, 3);930 int k = std::max(int(std::sqrt(double(_arc_num))), 10); 941 931 int i = 0, j = 0; 942 932 for (ArcIt a(_graph); a != INVALID; ++a) { … … 944 934 _source[i] = _node_id[_graph.source(a)]; 945 935 _target[i] = _node_id[_graph.target(a)]; 946 if ((i += skip) >= _arc_num) i = ++j;936 if ((i += k) >= _arc_num) i = ++j; 947 937 } 948 938 } else { … … 1127 1117 _state[e] = STATE_TREE; 1128 1118 if (_supply[u] >= 0) { 1129 _ pred_dir[u] = DIR_UP;1119 _forward[u] = true; 1130 1120 _pi[u] = 0; 1131 1121 _source[e] = u; … … 1134 1124 _cost[e] = 0; 1135 1125 } else { 1136 _ pred_dir[u] = DIR_DOWN;1126 _forward[u] = false; 1137 1127 _pi[u] = ART_COST; 1138 1128 _source[e] = _root; … … 1154 1144 _last_succ[u] = u; 1155 1145 if (_supply[u] >= 0) { 1156 _ pred_dir[u] = DIR_UP;1146 _forward[u] = true; 1157 1147 _pi[u] = 0; 1158 1148 _pred[u] = e; … … 1164 1154 _state[e] = STATE_TREE; 1165 1155 } else { 1166 _ pred_dir[u] = DIR_DOWN;1156 _forward[u] = false; 1167 1157 _pi[u] = ART_COST; 1168 1158 _pred[u] = f; … … 1195 1185 _last_succ[u] = u; 1196 1186 if (_supply[u] <= 0) { 1197 _ pred_dir[u] = DIR_DOWN;1187 _forward[u] = false; 1198 1188 _pi[u] = 0; 1199 1189 _pred[u] = e; … … 1205 1195 _state[e] = STATE_TREE; 1206 1196 } else { 1207 _ pred_dir[u] = DIR_UP;1197 _forward[u] = true; 1208 1198 _pi[u] = -ART_COST; 1209 1199 _pred[u] = f; … … 1248 1238 // Initialize first and second nodes according to the direction 1249 1239 // of the cycle 1250 int first, second;1251 1240 if (_state[in_arc] == STATE_LOWER) { 1252 1241 first = _source[in_arc]; … … 1258 1247 delta = _cap[in_arc]; 1259 1248 int result = 0; 1260 Value c,d;1249 Value d; 1261 1250 int e; 1262 1251 1263 // Search the cycle form the first node to the join node1252 // Search the cycle along the path form the first node to the root 1264 1253 for (int u = first; u != join; u = _parent[u]) { 1265 1254 e = _pred[u]; 1266 d = _flow[e]; 1267 if (_pred_dir[u] == DIR_DOWN) { 1268 c = _cap[e]; 1269 d = c >= MAX ? INF : c - d; 1270 } 1255 d = _forward[u] ? 1256 _flow[e] : (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]); 1271 1257 if (d < delta) { 1272 1258 delta = d; … … 1275 1261 } 1276 1262 } 1277 1278 // Search the cycle form the second node to the join node 1263 // Search the cycle along the path form the second node to the root 1279 1264 for (int u = second; u != join; u = _parent[u]) { 1280 1265 e = _pred[u]; 1281 d = _flow[e]; 1282 if (_pred_dir[u] == DIR_UP) { 1283 c = _cap[e]; 1284 d = c >= MAX ? INF : c - d; 1285 } 1266 d = _forward[u] ? 1267 (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]) : _flow[e]; 1286 1268 if (d <= delta) { 1287 1269 delta = d; … … 1308 1290 _flow[in_arc] += val; 1309 1291 for (int u = _source[in_arc]; u != join; u = _parent[u]) { 1310 _flow[_pred[u]] -= _pred_dir[u] *val;1292 _flow[_pred[u]] += _forward[u] ? -val : val; 1311 1293 } 1312 1294 for (int u = _target[in_arc]; u != join; u = _parent[u]) { 1313 _flow[_pred[u]] += _ pred_dir[u] *val;1295 _flow[_pred[u]] += _forward[u] ? val : -val; 1314 1296 } 1315 1297 } … … 1326 1308 // Update the tree structure 1327 1309 void updateTreeStructure() { 1310 int u, w; 1328 1311 int old_rev_thread = _rev_thread[u_out]; 1329 1312 int old_succ_num = _succ_num[u_out]; … … 1331 1314 v_out = _parent[u_out]; 1332 1315 1333 // Check if u_in and u_out coincide 1334 if (u_in == u_out) { 1335 // Update _parent, _pred, _pred_dir 1336 _parent[u_in] = v_in; 1337 _pred[u_in] = in_arc; 1338 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1339 1340 // Update _thread and _rev_thread 1341 if (_thread[v_in] != u_out) { 1342 int after = _thread[old_last_succ]; 1343 _thread[old_rev_thread] = after; 1344 _rev_thread[after] = old_rev_thread; 1345 after = _thread[v_in]; 1346 _thread[v_in] = u_out; 1347 _rev_thread[u_out] = v_in; 1348 _thread[old_last_succ] = after; 1349 _rev_thread[after] = old_last_succ; 1350 } 1316 u = _last_succ[u_in]; // the last successor of u_in 1317 right = _thread[u]; // the node after it 1318 1319 // Handle the case when old_rev_thread equals to v_in 1320 // (it also means that join and v_out coincide) 1321 if (old_rev_thread == v_in) { 1322 last = _thread[_last_succ[u_out]]; 1351 1323 } else { 1352 // Handle the case when old_rev_thread equals to v_in 1353 // (it also means that join and v_out coincide) 1354 int thread_continue = old_rev_thread == v_in ? 1355 _thread[old_last_succ] : _thread[v_in]; 1356 1357 // Update _thread and _parent along the stem nodes (i.e. the nodes 1358 // between u_in and u_out, whose parent have to be changed) 1359 int stem = u_in; // the current stem node 1360 int par_stem = v_in; // the new parent of stem 1361 int next_stem; // the next stem node 1362 int last = _last_succ[u_in]; // the last successor of stem 1363 int before, after = _thread[last]; 1364 _thread[v_in] = u_in; 1365 _dirty_revs.clear(); 1366 _dirty_revs.push_back(v_in); 1367 while (stem != u_out) { 1368 // Insert the next stem node into the thread list 1369 next_stem = _parent[stem]; 1370 _thread[last] = next_stem; 1371 _dirty_revs.push_back(last); 1372 1373 // Remove the subtree of stem from the thread list 1374 before = _rev_thread[stem]; 1375 _thread[before] = after; 1376 _rev_thread[after] = before; 1377 1378 // Change the parent node and shift stem nodes 1379 _parent[stem] = par_stem; 1380 par_stem = stem; 1381 stem = next_stem; 1382 1383 // Update last and after 1384 last = _last_succ[stem] == _last_succ[par_stem] ? 1385 _rev_thread[par_stem] : _last_succ[stem]; 1386 after = _thread[last]; 1387 } 1388 _parent[u_out] = par_stem; 1389 _thread[last] = thread_continue; 1390 _rev_thread[thread_continue] = last; 1391 _last_succ[u_out] = last; 1392 1393 // Remove the subtree of u_out from the thread list except for 1394 // the case when old_rev_thread equals to v_in 1395 if (old_rev_thread != v_in) { 1396 _thread[old_rev_thread] = after; 1397 _rev_thread[after] = old_rev_thread; 1398 } 1399 1400 // Update _rev_thread using the new _thread values 1401 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1402 int u = _dirty_revs[i]; 1403 _rev_thread[_thread[u]] = u; 1404 } 1405 1406 // Update _pred, _pred_dir, _last_succ and _succ_num for the 1407 // stem nodes from u_out to u_in 1408 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1409 for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { 1410 _pred[u] = _pred[p]; 1411 _pred_dir[u] = -_pred_dir[p]; 1412 tmp_sc += _succ_num[u] - _succ_num[p]; 1413 _succ_num[u] = tmp_sc; 1414 _last_succ[p] = tmp_ls; 1415 } 1416 _pred[u_in] = in_arc; 1417 _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; 1418 _succ_num[u_in] = old_succ_num; 1324 last = _thread[v_in]; 1325 } 1326 1327 // Update _thread and _parent along the stem nodes (i.e. the nodes 1328 // between u_in and u_out, whose parent have to be changed) 1329 _thread[v_in] = stem = u_in; 1330 _dirty_revs.clear(); 1331 _dirty_revs.push_back(v_in); 1332 par_stem = v_in; 1333 while (stem != u_out) { 1334 // Insert the next stem node into the thread list 1335 new_stem = _parent[stem]; 1336 _thread[u] = new_stem; 1337 _dirty_revs.push_back(u); 1338 1339 // Remove the subtree of stem from the thread list 1340 w = _rev_thread[stem]; 1341 _thread[w] = right; 1342 _rev_thread[right] = w; 1343 1344 // Change the parent node and shift stem nodes 1345 _parent[stem] = par_stem; 1346 par_stem = stem; 1347 stem = new_stem; 1348 1349 // Update u and right 1350 u = _last_succ[stem] == _last_succ[par_stem] ? 1351 _rev_thread[par_stem] : _last_succ[stem]; 1352 right = _thread[u]; 1353 } 1354 _parent[u_out] = par_stem; 1355 _thread[u] = last; 1356 _rev_thread[last] = u; 1357 _last_succ[u_out] = u; 1358 1359 // Remove the subtree of u_out from the thread list except for 1360 // the case when old_rev_thread equals to v_in 1361 // (it also means that join and v_out coincide) 1362 if (old_rev_thread != v_in) { 1363 _thread[old_rev_thread] = right; 1364 _rev_thread[right] = old_rev_thread; 1365 } 1366 1367 // Update _rev_thread using the new _thread values 1368 for (int i = 0; i != int(_dirty_revs.size()); ++i) { 1369 u = _dirty_revs[i]; 1370 _rev_thread[_thread[u]] = u; 1371 } 1372 1373 // Update _pred, _forward, _last_succ and _succ_num for the 1374 // stem nodes from u_out to u_in 1375 int tmp_sc = 0, tmp_ls = _last_succ[u_out]; 1376 u = u_out; 1377 while (u != u_in) { 1378 w = _parent[u]; 1379 _pred[u] = _pred[w]; 1380 _forward[u] = !_forward[w]; 1381 tmp_sc += _succ_num[u] - _succ_num[w]; 1382 _succ_num[u] = tmp_sc; 1383 _last_succ[w] = tmp_ls; 1384 u = w; 1385 } 1386 _pred[u_in] = in_arc; 1387 _forward[u_in] = (u_in == _source[in_arc]); 1388 _succ_num[u_in] = old_succ_num; 1389 1390 // Set limits for updating _last_succ form v_in and v_out 1391 // towards the root 1392 int up_limit_in = -1; 1393 int up_limit_out = -1; 1394 if (_last_succ[join] == v_in) { 1395 up_limit_out = join; 1396 } else { 1397 up_limit_in = join; 1419 1398 } 1420 1399 1421 1400 // Update _last_succ from v_in towards the root 1422 int up_limit_out = _last_succ[join] == v_in ? join : -1; 1423 int last_succ_out = _last_succ[u_out]; 1424 for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { 1425 _last_succ[u] = last_succ_out; 1426 } 1427 1401 for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; 1402 u = _parent[u]) { 1403 _last_succ[u] = _last_succ[u_out]; 1404 } 1428 1405 // Update _last_succ from v_out towards the root 1429 1406 if (join != old_rev_thread && v_in != old_rev_thread) { 1430 for ( intu = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;1407 for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1431 1408 u = _parent[u]) { 1432 1409 _last_succ[u] = old_rev_thread; 1433 1410 } 1434 } 1435 else if (last_succ_out != old_last_succ) { 1436 for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1411 } else { 1412 for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; 1437 1413 u = _parent[u]) { 1438 _last_succ[u] = last_succ_out;1414 _last_succ[u] = _last_succ[u_out]; 1439 1415 } 1440 1416 } 1441 1417 1442 1418 // Update _succ_num from v_in to join 1443 for ( intu = v_in; u != join; u = _parent[u]) {1419 for (u = v_in; u != join; u = _parent[u]) { 1444 1420 _succ_num[u] += old_succ_num; 1445 1421 } 1446 1422 // Update _succ_num from v_out to join 1447 for ( intu = v_out; u != join; u = _parent[u]) {1423 for (u = v_out; u != join; u = _parent[u]) { 1448 1424 _succ_num[u] -= old_succ_num; 1449 1425 } 1450 1426 } 1451 1427 1452 // Update potentials in the subtree that has been moved1428 // Update potentials 1453 1429 void updatePotential() { 1454 Cost sigma = _pi[v_in] - _pi[u_in] - 1455 _pred_dir[u_in] * _cost[in_arc]; 1430 Cost sigma = _forward[u_in] ? 1431 _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : 1432 _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; 1433 // Update potentials in the subtree, which has been moved 1456 1434 int end = _thread[_last_succ[u_in]]; 1457 1435 for (int u = u_in; u != end; u = _thread[u]) { -
lemon/path.h
r1000 r998 44 44 /// 45 45 /// In a sense, the path can be treated as a list of arcs. The 46 /// LEMONpath type stores just this list. As a consequence, it46 /// lemon path type stores just this list. As a consequence, it 47 47 /// cannot enumerate the nodes of the path and the source node of 48 48 /// a zero length path is undefined. … … 149 149 void clear() { head.clear(); tail.clear(); } 150 150 151 /// \brief The n -th arc.151 /// \brief The nth arc. 152 152 /// 153 153 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 157 157 } 158 158 159 /// \brief Initialize arc iterator to point to the n -th arc159 /// \brief Initialize arc iterator to point to the nth arc 160 160 /// 161 161 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 245 245 /// 246 246 /// In a sense, the path can be treated as a list of arcs. The 247 /// LEMONpath type stores just this list. As a consequence it247 /// lemon path type stores just this list. As a consequence it 248 248 /// cannot enumerate the nodes in the path and the zero length paths 249 249 /// cannot store the source. … … 354 354 void clear() { data.clear(); } 355 355 356 /// \brief The n -th arc.356 /// \brief The nth arc. 357 357 /// 358 358 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 361 361 } 362 362 363 /// \brief Initializes arc iterator to point to the n -th arc.363 /// \brief Initializes arc iterator to point to the nth arc. 364 364 ArcIt nthIt(int n) const { 365 365 return ArcIt(*this, n); … … 422 422 /// 423 423 /// In a sense, the path can be treated as a list of arcs. The 424 /// LEMONpath type stores just this list. As a consequence it424 /// lemon path type stores just this list. As a consequence it 425 425 /// cannot enumerate the nodes in the path and the zero length paths 426 426 /// cannot store the source. … … 544 544 }; 545 545 546 /// \brief The n -th arc.547 /// 548 /// This function looks for the n -th arc in O(n) time.546 /// \brief The nth arc. 547 /// 548 /// This function looks for the nth arc in O(n) time. 549 549 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. 550 550 const Arc& nth(int n) const { … … 556 556 } 557 557 558 /// \brief Initializes arc iterator to point to the n -th arc.558 /// \brief Initializes arc iterator to point to the nth arc. 559 559 ArcIt nthIt(int n) const { 560 560 Node *node = first; … … 775 775 /// 776 776 /// In a sense, the path can be treated as a list of arcs. The 777 /// LEMONpath type stores just this list. As a consequence it777 /// lemon path type stores just this list. As a consequence it 778 778 /// cannot enumerate the nodes in the path and the source node of 779 779 /// a zero length path is undefined. … … 884 884 }; 885 885 886 /// \brief The n -th arc.886 /// \brief The nth arc. 887 887 /// 888 888 /// \pre \c n is in the <tt>[0..length() - 1]</tt> range. … … 891 891 } 892 892 893 /// \brief The arc iterator pointing to the n -th arc.893 /// \brief The arc iterator pointing to the nth arc. 894 894 ArcIt nthIt(int n) const { 895 895 return ArcIt(*this, n); … … 1095 1095 /// 1096 1096 /// In a sense, the path can be treated as a list of arcs. The 1097 /// LEMONpath type stores only this list. As a consequence, it1097 /// lemon path type stores only this list. As a consequence, it 1098 1098 /// cannot enumerate the nodes in the path and the zero length paths 1099 1099 /// cannot have a source node. -
test/CMakeLists.txt
r1000 r998 38 38 maps_test 39 39 matching_test 40 max_cardinality_search_test41 max_clique_test42 40 min_cost_arborescence_test 43 41 min_cost_flow_test 44 42 min_mean_cycle_test 45 nagamochi_ibaraki_test46 43 path_test 47 44 planarity_test -
test/graph_copy_test.cc
r894 r893 19 19 #include <lemon/smart_graph.h> 20 20 #include <lemon/list_graph.h> 21 #include <lemon/static_graph.h>22 21 #include <lemon/lgf_reader.h> 23 22 #include <lemon/error.h> … … 28 27 using namespace lemon; 29 28 30 template <typename GR>31 29 void digraph_copy_test() { 32 30 const int nn = 10; … … 54 52 } 55 53 } 56 54 57 55 // Test digraph copy 58 GRto;59 typename GR::templateNodeMap<int> tnm(to);60 typename GR::templateArcMap<int> tam(to);61 typename GR::Node tn;62 typename GR::Arc ta;63 64 SmartDigraph::NodeMap< typename GR::Node> nr(from);65 SmartDigraph::ArcMap< typename GR::Arc> er(from);66 67 typename GR::templateNodeMap<SmartDigraph::Node> ncr(to);68 typename GR::templateArcMap<SmartDigraph::Arc> ecr(to);56 ListDigraph to; 57 ListDigraph::NodeMap<int> tnm(to); 58 ListDigraph::ArcMap<int> tam(to); 59 ListDigraph::Node tn; 60 ListDigraph::Arc ta; 61 62 SmartDigraph::NodeMap<ListDigraph::Node> nr(from); 63 SmartDigraph::ArcMap<ListDigraph::Arc> er(from); 64 65 ListDigraph::NodeMap<SmartDigraph::Node> ncr(to); 66 ListDigraph::ArcMap<SmartDigraph::Arc> ecr(to); 69 67 70 68 digraphCopy(from, to). … … 89 87 } 90 88 91 for ( typename GR::NodeIt it(to); it != INVALID; ++it) {89 for (ListDigraph::NodeIt it(to); it != INVALID; ++it) { 92 90 check(nr[ncr[it]] == it, "Wrong copy."); 93 91 } 94 92 95 for ( typename GR::ArcIt it(to); it != INVALID; ++it) {93 for (ListDigraph::ArcIt it(to); it != INVALID; ++it) { 96 94 check(er[ecr[it]] == it, "Wrong copy."); 97 95 } … … 106 104 } 107 105 108 template <typename GR>109 106 void graph_copy_test() { 110 107 const int nn = 10; … … 139 136 140 137 // Test graph copy 141 GRto;142 typename GR::templateNodeMap<int> tnm(to);143 typename GR::templateArcMap<int> tam(to);144 typename GR::templateEdgeMap<int> tem(to);145 typename GR::Node tn;146 typename GR::Arc ta;147 typename GR::Edge te;148 149 SmartGraph::NodeMap< typename GR::Node> nr(from);150 SmartGraph::ArcMap< typename GR::Arc> ar(from);151 SmartGraph::EdgeMap< typename GR::Edge> er(from);152 153 typename GR::templateNodeMap<SmartGraph::Node> ncr(to);154 typename GR::templateArcMap<SmartGraph::Arc> acr(to);155 typename GR::templateEdgeMap<SmartGraph::Edge> ecr(to);138 ListGraph to; 139 ListGraph::NodeMap<int> tnm(to); 140 ListGraph::ArcMap<int> tam(to); 141 ListGraph::EdgeMap<int> tem(to); 142 ListGraph::Node tn; 143 ListGraph::Arc ta; 144 ListGraph::Edge te; 145 146 SmartGraph::NodeMap<ListGraph::Node> nr(from); 147 SmartGraph::ArcMap<ListGraph::Arc> ar(from); 148 SmartGraph::EdgeMap<ListGraph::Edge> er(from); 149 150 ListGraph::NodeMap<SmartGraph::Node> ncr(to); 151 ListGraph::ArcMap<SmartGraph::Arc> acr(to); 152 ListGraph::EdgeMap<SmartGraph::Edge> ecr(to); 156 153 157 154 graphCopy(from, to). … … 188 185 } 189 186 190 for ( typename GR::NodeIt it(to); it != INVALID; ++it) {187 for (ListGraph::NodeIt it(to); it != INVALID; ++it) { 191 188 check(nr[ncr[it]] == it, "Wrong copy."); 192 189 } 193 190 194 for ( typename GR::ArcIt it(to); it != INVALID; ++it) {191 for (ListGraph::ArcIt it(to); it != INVALID; ++it) { 195 192 check(ar[acr[it]] == it, "Wrong copy."); 196 193 } 197 for ( typename GR::EdgeIt it(to); it != INVALID; ++it) {194 for (ListGraph::EdgeIt it(to); it != INVALID; ++it) { 198 195 check(er[ecr[it]] == it, "Wrong copy."); 199 196 } … … 212 209 213 210 int main() { 214 digraph_copy_test<SmartDigraph>(); 215 digraph_copy_test<ListDigraph>(); 216 digraph_copy_test<StaticDigraph>(); 217 graph_copy_test<SmartGraph>(); 218 graph_copy_test<ListGraph>(); 211 digraph_copy_test(); 212 graph_copy_test(); 219 213 220 214 return 0;
Note: See TracChangeset
for help on using the changeset viewer.