Index: AUTHORS
===================================================================
--- AUTHORS	(revision 951)
+++ AUTHORS	(revision 992)
@@ -1,3 +1,3 @@
-The authors of the 1.x series are
+The main developers of release series 1.x are
 
  * Balazs Dezso <deba@inf.elte.hu>
@@ -6,9 +6,9 @@
  * Akos Ladanyi <ladanyi@tmit.bme.hu>
 
-For more details on the actual contribution, please visit the history
-of the main LEMON source repository: http://lemon.cs.elte.hu/hg/lemon
+For more complete list of contributors, please visit the history of
+the LEMON source code repository: http://lemon.cs.elte.hu/hg/lemon
 
-Moreover, this version is heavily based on the 0.x series of
-LEMON. Here is the list of people who contributed to those versions.
+Moreover, this version is heavily based on version 0.x of LEMON. Here
+is the list of people who contributed to those versions.
 
  * Mihaly Barasz <klao@cs.elte.hu>
Index: CMakeLists.txt
===================================================================
--- CMakeLists.txt	(revision 998)
+++ CMakeLists.txt	(revision 1000)
@@ -13,27 +13,45 @@
 ELSE()
   EXECUTE_PROCESS(
-    COMMAND ${PYTHON_EXECUTABLE} ./scripts/chg-len.py
+    COMMAND
+    hg log -r. --template "{latesttag}"
     WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
-    OUTPUT_VARIABLE HG_REVISION_PATH
+    OUTPUT_VARIABLE HG_REVISION_TAG
     ERROR_QUIET
     OUTPUT_STRIP_TRAILING_WHITESPACE
   )
   EXECUTE_PROCESS(
-    COMMAND hg id -i
+    COMMAND
+    hg log -r. --template "{latesttagdistance}"
     WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
-    OUTPUT_VARIABLE HG_REVISION
+    OUTPUT_VARIABLE HG_REVISION_DIST
     ERROR_QUIET
     OUTPUT_STRIP_TRAILING_WHITESPACE
   )
-  IF(HG_REVISION STREQUAL "")
+  EXECUTE_PROCESS(
+    COMMAND
+    hg log -r. --template "{node|short}"
+    WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
+    OUTPUT_VARIABLE HG_REVISION_ID
+    ERROR_QUIET
+    OUTPUT_STRIP_TRAILING_WHITESPACE
+  )
+
+  IF(HG_REVISION_TAG STREQUAL "")
     SET(HG_REVISION_ID "hg-tip")
   ELSE()
-    IF(HG_REVISION_PATH STREQUAL "")
-      SET(HG_REVISION_ID ${HG_REVISION})
+    IF(HG_REVISION_TAG STREQUAL "null")
+      SET(HG_REVISION_TAG "trunk")
+    ELSEIF(HG_REVISION_TAG MATCHES "^r")
+      STRING(SUBSTRING ${HG_REVISION_TAG} 1 -1 HG_REVISION_TAG)
+    ENDIF()
+    IF(HG_REVISION_DIST STREQUAL "0")
+      SET(HG_REVISION ${HG_REVISION_TAG})
     ELSE()
-      SET(HG_REVISION_ID ${HG_REVISION_PATH}.${HG_REVISION})
+      SET(HG_REVISION
+	"${HG_REVISION_TAG}+${HG_REVISION_DIST}-${HG_REVISION_ID}")
     ENDIF()
   ENDIF()
-  SET(LEMON_VERSION ${HG_REVISION_ID} CACHE STRING "LEMON version string.")
+
+  SET(LEMON_VERSION ${HG_REVISION} CACHE STRING "LEMON version string.")
 ENDIF()
 
@@ -115,5 +133,25 @@
 SET(LEMON_HAVE_LONG_LONG ${HAVE_LONG_LONG})
 
-INCLUDE(FindPythonInterp)
+INCLUDE(FindThreads)
+
+IF(NOT LEMON_THREADING)
+  IF(CMAKE_USE_PTHREADS_INIT)
+    SET(LEMON_THREADING "Pthread")
+  ELSEIF(CMAKE_USE_WIN32_THREADS_INIT)
+    SET(LEMON_THREADING "Win32")
+  ELSE()
+    SET(LEMON_THREADING "None")
+  ENDIF()
+ENDIF()
+
+SET( LEMON_THREADING "${LEMON_THREADING}" CACHE STRING
+  "Choose the threading library, options are: Pthread Win32 None."
+  FORCE )
+
+IF(LEMON_THREADING STREQUAL "Pthread")
+  SET(LEMON_USE_PTHREAD TRUE)
+ELSEIF(LEMON_THREADING STREQUAL "Win32")
+  SET(LEMON_USE_WIN32_THREADS TRUE)
+ENDIF()
 
 ENABLE_TESTING()
@@ -127,4 +165,5 @@
 ADD_SUBDIRECTORY(lemon)
 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR})
+  ADD_SUBDIRECTORY(contrib)
   ADD_SUBDIRECTORY(demo)
   ADD_SUBDIRECTORY(tools)
@@ -150,4 +189,31 @@
 ENDIF()
 
+CONFIGURE_FILE(
+  ${PROJECT_SOURCE_DIR}/cmake/version.cmake.in
+  ${PROJECT_BINARY_DIR}/cmake/version.cmake
+  @ONLY
+)
+
+SET(ARCHIVE_BASE_NAME ${CMAKE_PROJECT_NAME})
+STRING(TOLOWER ${ARCHIVE_BASE_NAME} ARCHIVE_BASE_NAME)
+SET(ARCHIVE_NAME ${ARCHIVE_BASE_NAME}-${PROJECT_VERSION})
+ADD_CUSTOM_TARGET(dist
+  COMMAND cmake -E remove_directory ${ARCHIVE_NAME}
+  COMMAND hg archive ${ARCHIVE_NAME}
+  COMMAND cmake -E copy cmake/version.cmake ${ARCHIVE_NAME}/cmake/version.cmake
+  COMMAND tar -czf ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_NAME}
+  COMMAND zip -r ${ARCHIVE_BASE_NAME}-nodoc-${PROJECT_VERSION}.zip ${ARCHIVE_NAME}
+  COMMAND cmake -E copy_directory doc/html ${ARCHIVE_NAME}/doc/html
+  COMMAND tar -czf ${ARCHIVE_NAME}.tar.gz ${ARCHIVE_NAME}
+  COMMAND zip -r ${ARCHIVE_NAME}.zip ${ARCHIVE_NAME}
+  COMMAND cmake -E copy_directory doc/html ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}
+  COMMAND tar -czf ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.tar.gz ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}
+  COMMAND zip -r ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}.zip ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}
+  COMMAND cmake -E remove_directory ${ARCHIVE_NAME}
+  COMMAND cmake -E remove_directory ${ARCHIVE_BASE_NAME}-doc-${PROJECT_VERSION}
+  DEPENDS html
+  WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
+
+# CPACK config (Basically for NSIS)
 IF(${CMAKE_SOURCE_DIR} STREQUAL ${PROJECT_SOURCE_DIR})
   SET(CPACK_PACKAGE_NAME ${PROJECT_NAME})
Index: INSTALL
===================================================================
--- INSTALL	(revision 824)
+++ INSTALL	(revision 992)
@@ -2,196 +2,120 @@
 =========================
 
-Since you are reading this I assume you already obtained one of the release
-tarballs and successfully extracted it. The latest version of LEMON is
-available at our web page (http://lemon.cs.elte.hu/).
+This file contains instructions for building and installing LEMON from
+source on Linux. The process on Windows is similar.
 
-LEMON provides two different build environments, one is based on "autotool",
-while the other is based on "cmake". This file contains instructions only for
-the former one, which is the recommended build environment on Linux, Mac OSX
-and other unices or if you use Cygwin on Windows. For cmake installation
-instructions visit http://lemon.cs.elte.hu.
+Note that it is not necessary to install LEMON in order to use
+it. Instead, you can easily integrate it with your own code
+directly. For instructions, see
+https://lemon.cs.elte.hu/trac/lemon/wiki/HowToCompile
+
 
 In order to install LEMON from the extracted source tarball you have to
 issue the following commands:
 
-   1. `cd lemon-x.y.z'
+   1. Step into the root of the source directory.
 
-      This command changes to the directory which was created when you
-      extracted the sources. The x.y.z part is a version number.
+      $ cd lemon-x.y.z
 
-   2. `./configure'
+   2. Create a build subdirectory and step into it.
 
-      This command runs the configure shell script, which does some checks and
-      creates the makefiles.
+      $ mkdir build
+      $ cd build
 
-   3. `make'
+   3. Perform system checks and create the makefiles.
 
-      This command compiles the non-template part of LEMON into libemon.a
-      file. It also compiles the programs in the tools subdirectory by
-      default.
+      $ cmake ..
 
-   4. `make check'
+   4. Build LEMON.
 
-      This step is optional, but recommended. It runs the test programs that
-      we developed for LEMON to check whether the library works properly on
-      your platform.
+      $ make 
 
-   5. `make install'
+      This command compiles the non-template part of LEMON into
+      libemon.a file. It also compiles the programs in the 'tools' and
+      'demo' subdirectories.
+
+   5. [Optional] Compile and run the self-tests.
+
+      $ make check
+
+   5. [Optional] Generate the user documentation.
+
+      $ make html
+
+      The release tarballs already include the documentation.
+
+      Note that for this step you need to have the following tools
+      installed: Python, Doxygen, Graphviz, Ghostscript, LaTeX.
+
+   6. [Optional] Install LEMON
+
+      $ make install
 
       This command installs LEMON under /usr/local (you will need root
-      privileges to be able to do that). If you want to install it to some
-      other location, then pass the --prefix=DIRECTORY flag to configure in
-      step 2. For example: `./configure --prefix=/home/username/lemon'.
-
-   6. `make install-html'
-
-      This command installs the documentation under share/doc/lemon/docs. The
-      generated documentation is included in the tarball. If you want to
-      generate it yourself, then run `make html'. Note that for this you need
-      to have the following programs installed: Doxygen, Graphviz, Ghostscript,
-      Latex.
-
+      privileges to be able to do that). If you want to install it to
+      some other location, then pass the
+      -DCMAKE_INSTALL_PREFIX=DIRECTORY flag to cmake in Step 3.
+      For example:
+      
+      $ cmake -DCMAKE_INSTALL_PREFIX=/home/username/lemon'
 
 Configure Options and Variables
 ===============================
 
-In step 2 you can customize the actions of configure by setting variables
-and passing options to it. This can be done like this:
-`./configure [OPTION]... [VARIABLE=VALUE]...'
+In Step 3, you can customize the build process by passing options to CMAKE.
 
-Below you will find some useful variables and options (see `./configure --help'
-for more):
+$ cmake [OPTIONS] ..
 
-CXX='comp'
+You find a list of the most useful options below.
 
-  Change the C++ compiler to 'comp'.
-
-CXXFLAGS='flags'
-
-  Pass the 'flags' to the compiler. For example CXXFLAGS='-O3 -march=pentium-m'
-  turns on generation of aggressively optimized Pentium-M specific code.
-
---prefix=PREFIX
+-DCMAKE_INSTALL_PREFIX=PREFIX
 
   Set the installation prefix to PREFIX. By default it is /usr/local.
 
---enable-tools
+-DCMAKE_BUILD_TYPE=[Release|Debug|Maintainer|...]
 
-   Build the programs in the tools subdirectory (default).
+  This sets the compiler options. The choices are the following
 
---disable-tools
+  'Release': A strong optimization is turned on (-O3 with gcc). This
+    is the default setting and we strongly recommend using this for
+    the final compilation.
 
-   Do not build the programs in the tools subdirectory.
+  'Debug': Optimization is turned off and debug info is added (-O0
+    -ggdb with gcc). If is recommended during the development.
 
---with-glpk[=PREFIX]
+  'Maintainer': The same as 'Debug' but the compiler warnings are
+    converted to errors (-Werror with gcc). In addition, 'make' will
+    also automatically compile and execute the test codes. It is the
+    best way of ensuring that LEMON codebase is clean and safe.
 
-   Enable GLPK support (default). You should specify the prefix too if
-   you installed GLPK to some non-standard location (e.g. your home
-   directory). If it is not found, GLPK support will be disabled.
+  'RelWithDebInfo': Optimized build with debug info.
 
---with-glpk-includedir=DIR
+  'MinSizeRel': Size optimized build (-Os with gcc)
 
-   The directory where the GLPK header files are located. This is only
-   useful when the GLPK headers and libraries are not under the same
-   prefix (which is unlikely).
+-DTEST_WITH_VALGRIND=YES
 
---with-glpk-libdir=DIR
+  Using this, the test codes will be executed using valgrind. It is a
+  very effective way of identifying indexing problems and memory leaks.
 
-   The directory where the GLPK libraries are located. This is only
-   useful when the GLPK headers and libraries are not under the same
-   prefix (which is unlikely).
+-DCMAKE_CXX_COMPILER=path-to-compiler
 
---without-glpk
+  Change the compiler to be used.
 
-   Disable GLPK support.
+-DBUILD_SHARED_LIBS=TRUE
 
---with-cplex[=PREFIX]
+  Build shared library instead of static one. Think twice if you
+  really want to use this option.
 
-   Enable CPLEX support (default). You should specify the prefix too
-   if you installed CPLEX to some non-standard location
-   (e.g. /opt/ilog/cplex75). If it is not found, CPLEX support will be
-   disabled.
+-DGLPK_ROOT_DIR=DIRECTORY
+-DCOIN_ROOT_DIR=DIRECTORY
+-DCPLEX_ROOT_DIR=DIRECTORY
 
---with-cplex-includedir=DIR
-
-   The directory where the CPLEX header files are located. This is
-   only useful when the CPLEX headers and libraries are not under the
-   same prefix (e.g.  /usr/local/cplex/cplex75/include).
-
---with-cplex-libdir=DIR
-
-   The directory where the CPLEX libraries are located. This is only
-   useful when the CPLEX headers and libraries are not under the same
-   prefix (e.g.
-   /usr/local/cplex/cplex75/lib/i86_linux2_glibc2.2_gcc3.0/static_pic_mt).
-
---without-cplex
-
-   Disable CPLEX support.
-
---with-soplex[=PREFIX]
-
-   Enable SoPlex support (default). You should specify the prefix too if
-   you installed SoPlex to some non-standard location (e.g. your home
-   directory). If it is not found, SoPlex support will be disabled.
-
---with-soplex-includedir=DIR
-
-   The directory where the SoPlex header files are located. This is only
-   useful when the SoPlex headers and libraries are not under the same
-   prefix (which is unlikely).
-
---with-soplex-libdir=DIR
-
-   The directory where the SoPlex libraries are located. This is only
-   useful when the SoPlex headers and libraries are not under the same
-   prefix (which is unlikely).
-
---without-soplex
-
-   Disable SoPlex support.
-
---with-coin[=PREFIX]
-
-   Enable support for COIN-OR solvers (CLP and CBC). You should
-   specify the prefix too. (by default, COIN-OR tools install
-   themselves to the source code directory). This command enables the
-   solvers that are actually found.
-
---with-coin-includedir=DIR
-
-   The directory where the COIN-OR header files are located. This is
-   only useful when the COIN-OR headers and libraries are not under
-   the same prefix (which is unlikely).
-
---with-coin-libdir=DIR
-
-   The directory where the COIN-OR libraries are located. This is only
-   useful when the COIN-OR headers and libraries are not under the
-   same prefix (which is unlikely).
-
---without-coin
-
-   Disable COIN-OR support.
-
+  Install root directory prefixes of optional third party libraries.
 
 Makefile Variables
 ==================
 
-Some Makefile variables are reserved by the GNU Coding Standards for
-the use of the "user" - the person building the package. For instance,
-CXX and CXXFLAGS are such variables, and have the same meaning as
-explained in the previous section. These variables can be set on the
-command line when invoking `make' like this:
-`make [VARIABLE=VALUE]...'
+make VERBOSE=1
 
-WARNINGCXXFLAGS is a non-standard Makefile variable introduced by us
-to hold several compiler flags related to warnings. Its default value
-can be overridden when invoking `make'. For example to disable all
-warning flags use `make WARNINGCXXFLAGS='.
-
-In order to turn off a single flag from the default set of warning
-flags, you can use the CXXFLAGS variable, since this is passed after
-WARNINGCXXFLAGS. For example to turn off `-Wold-style-cast' (which is
-used by default when g++ is detected) you can use
-`make CXXFLAGS="-g -O2 -Wno-old-style-cast"'.
+   This results in a more verbose output by showing the full
+   compiler and linker commands.
Index: LICENSE
===================================================================
--- LICENSE	(revision 879)
+++ LICENSE	(revision 992)
@@ -2,5 +2,5 @@
 copyright/license.
 
-Copyright (C) 2003-2010 Egervary Jeno Kombinatorikus Optimalizalasi
+Copyright (C) 2003-2012 Egervary Jeno Kombinatorikus Optimalizalasi
 Kutatocsoport (Egervary Combinatorial Optimization Research Group,
 EGRES).
Index: kefile.am
===================================================================
--- Makefile.am	(revision 793)
+++ 	(revision )
@@ -1,80 +1,0 @@
-ACLOCAL_AMFLAGS = -I m4
-
-AM_CXXFLAGS = $(WARNINGCXXFLAGS)
-
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_builddir)
-LDADD = $(top_builddir)/lemon/libemon.la
-
-EXTRA_DIST = \
-	AUTHORS \
-	LICENSE \
-	m4/lx_check_cplex.m4 \
-	m4/lx_check_glpk.m4 \
-	m4/lx_check_soplex.m4 \
-	m4/lx_check_coin.m4 \
-	CMakeLists.txt \
-	cmake/FindGhostscript.cmake \
-	cmake/FindCPLEX.cmake \
-	cmake/FindGLPK.cmake \
-	cmake/FindCOIN.cmake \
-	cmake/LEMONConfig.cmake.in \
-	cmake/version.cmake.in \
-	cmake/version.cmake \
-	cmake/nsis/lemon.ico \
-	cmake/nsis/uninstall.ico
-
-pkgconfigdir = $(libdir)/pkgconfig
-lemondir = $(pkgincludedir)
-bitsdir = $(lemondir)/bits
-conceptdir = $(lemondir)/concepts
-pkgconfig_DATA =
-lib_LTLIBRARIES =
-lemon_HEADERS =
-bits_HEADERS =
-concept_HEADERS =
-noinst_HEADERS =
-noinst_PROGRAMS =
-bin_PROGRAMS =
-check_PROGRAMS =
-dist_bin_SCRIPTS =
-TESTS =
-XFAIL_TESTS =
-
-include lemon/Makefile.am
-include test/Makefile.am
-include doc/Makefile.am
-include tools/Makefile.am
-include scripts/Makefile.am
-
-DIST_SUBDIRS = demo
-
-demo:
-	$(MAKE) $(AM_MAKEFLAGS) -C demo
-
-MRPROPERFILES = \
-	aclocal.m4 \
-	config.h.in \
-	config.h.in~ \
-	configure \
-	Makefile.in \
-	build-aux/config.guess \
-	build-aux/config.sub \
-	build-aux/depcomp \
-	build-aux/install-sh \
-	build-aux/ltmain.sh \
-	build-aux/missing \
-	doc/doxygen.log
-
-mrproper:
-	$(MAKE) $(AM_MAKEFLAGS) maintainer-clean
-	-rm -f $(MRPROPERFILES)
-
-dist-bz2: dist
-	zcat $(PACKAGE)-$(VERSION).tar.gz | \
-	bzip2 --best -c > $(PACKAGE)-$(VERSION).tar.bz2
-
-distcheck-bz2: distcheck
-	zcat $(PACKAGE)-$(VERSION).tar.gz | \
-	bzip2 --best -c > $(PACKAGE)-$(VERSION).tar.bz2
-
-.PHONY: demo mrproper dist-bz2 distcheck-bz2
Index: cmake/version.cmake.in
===================================================================
--- cmake/version.cmake.in	(revision 678)
+++ cmake/version.cmake.in	(revision 983)
@@ -1,1 +1,1 @@
-SET(LEMON_VERSION "@PACKAGE_VERSION@" CACHE STRING "LEMON version string.")
+SET(LEMON_VERSION "@LEMON_VERSION@" CACHE STRING "LEMON version string.")
Index: nfigure.ac
===================================================================
--- configure.ac	(revision 930)
+++ 	(revision )
@@ -1,157 +1,0 @@
-dnl Process this file with autoconf to produce a configure script.
-
-dnl Version information.
-m4_define([lemon_version_number],
-          [m4_normalize(esyscmd([echo ${LEMON_VERSION}]))])
-dnl m4_define([lemon_version_number], [])
-m4_define([lemon_hg_path], [m4_normalize(esyscmd([./scripts/chg-len.py]))])
-m4_define([lemon_hg_revision], [m4_normalize(esyscmd([hg id -i 2> /dev/null]))])
-m4_define([lemon_version], [ifelse(lemon_version_number(),
-                           [],
-                           [ifelse(lemon_hg_revision(),
-                           [],
-                           [hg-tip],
-                           [lemon_hg_path().lemon_hg_revision()])],
-                           [lemon_version_number()])])
-
-AC_PREREQ([2.59])
-AC_INIT([LEMON], [lemon_version()], [lemon-user@lemon.cs.elte.hu], [lemon])
-AC_CONFIG_AUX_DIR([build-aux])
-AC_CONFIG_MACRO_DIR([m4])
-AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects nostdinc])
-AC_CONFIG_SRCDIR([lemon/list_graph.h])
-AC_CONFIG_HEADERS([config.h lemon/config.h])
-
-AC_DEFINE([LEMON_VERSION], [lemon_version()], [The version string])
-
-dnl Do compilation tests using the C++ compiler.
-AC_LANG([C++])
-
-dnl Check the existence of long long type.
-AC_CHECK_TYPE(long long, [long_long_found=yes], [long_long_found=no])
-if test x"$long_long_found" = x"yes"; then
-  AC_DEFINE([LEMON_HAVE_LONG_LONG], [1], [Define to 1 if you have long long.])
-fi
-
-dnl Checks for programs.
-AC_PROG_CXX
-AC_PROG_CXXCPP
-AC_PROG_INSTALL
-AC_DISABLE_SHARED
-AC_PROG_LIBTOOL
-
-AC_CHECK_PROG([doxygen_found],[doxygen],[yes],[no])
-AC_CHECK_PROG([python_found],[python],[yes],[no])
-AC_CHECK_PROG([gs_found],[gs],[yes],[no])
-
-dnl Detect Intel compiler.
-AC_MSG_CHECKING([whether we are using the Intel C++ compiler])
-AC_COMPILE_IFELSE([#ifndef __INTEL_COMPILER
-choke me
-#endif], [ICC=[yes]], [ICC=[no]])
-if test x"$ICC" = x"yes"; then
-  AC_MSG_RESULT([yes])
-else
-  AC_MSG_RESULT([no])
-fi
-
-dnl Set custom compiler flags when using g++.
-if test "$GXX" = yes -a "$ICC" = no; then
-  WARNINGCXXFLAGS="-Wall -W -Wall -W -Wunused -Wformat=2 -Wctor-dtor-privacy -Wnon-virtual-dtor -Wno-char-subscripts -Wwrite-strings -Wno-char-subscripts -Wreturn-type -Wcast-qual -Wcast-align -Wsign-promo -Woverloaded-virtual -ansi -fno-strict-aliasing -Wold-style-cast -Wno-unknown-pragmas"
-fi
-AC_SUBST([WARNINGCXXFLAGS])
-
-dnl Checks for libraries.
-LX_CHECK_GLPK
-LX_CHECK_CPLEX
-LX_CHECK_SOPLEX
-LX_CHECK_COIN
-
-AM_CONDITIONAL([HAVE_LP], [test x"$lx_lp_found" = x"yes"])
-AM_CONDITIONAL([HAVE_MIP], [test x"$lx_mip_found" = x"yes"])
-
-dnl Disable/enable building the binary tools.
-AC_ARG_ENABLE([tools],
-AS_HELP_STRING([--enable-tools], [build additional tools @<:@default@:>@])
-AS_HELP_STRING([--disable-tools], [do not build additional tools]),
-              [], [enable_tools=yes])
-AC_MSG_CHECKING([whether to build the additional tools])
-if test x"$enable_tools" != x"no"; then
-  AC_MSG_RESULT([yes])
-else
-  AC_MSG_RESULT([no])
-fi
-AM_CONDITIONAL([WANT_TOOLS], [test x"$enable_tools" != x"no"])
-
-dnl Support for running test cases using valgrind.
-use_valgrind=no
-AC_ARG_ENABLE([valgrind],
-AS_HELP_STRING([--enable-valgrind], [use valgrind when running tests]),
-              [use_valgrind=yes])
-
-if [[ "$use_valgrind" = "yes" ]]; then
-  AC_CHECK_PROG(HAVE_VALGRIND, valgrind, yes, no)
-
-  if [[ "$HAVE_VALGRIND" = "no" ]]; then
-    AC_MSG_ERROR([Valgrind not found in PATH.])
-  fi
-fi
-AM_CONDITIONAL(USE_VALGRIND, [test "$use_valgrind" = "yes"])
-
-dnl Checks for header files.
-AC_CHECK_HEADERS(limits.h sys/time.h sys/times.h unistd.h)
-
-dnl Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_C_INLINE
-AC_TYPE_SIZE_T
-AC_HEADER_TIME
-AC_STRUCT_TM
-
-dnl Checks for library functions.
-AC_HEADER_STDC
-AC_CHECK_FUNCS(gettimeofday times ctime_r)
-
-dnl Add dependencies on files generated by configure.
-AC_SUBST([CONFIG_STATUS_DEPENDENCIES],
-  ['$(top_srcdir)/doc/Doxyfile.in $(top_srcdir)/doc/mainpage.dox.in $(top_srcdir)/lemon/lemon.pc.in $(top_srcdir)/cmake/version.cmake.in'])
-
-AC_CONFIG_FILES([
-Makefile
-demo/Makefile
-cmake/version.cmake
-doc/Doxyfile
-doc/mainpage.dox
-lemon/lemon.pc
-])
-
-AC_OUTPUT
-
-echo
-echo '****************************** SUMMARY ******************************'
-echo
-echo Package version............... : $PACKAGE-$VERSION
-echo
-echo C++ compiler.................. : $CXX
-echo C++ compiles flags............ : $WARNINGCXXFLAGS $CXXFLAGS
-echo
-echo Compiler supports long long... : $long_long_found
-echo
-echo GLPK support.................. : $lx_glpk_found
-echo CPLEX support................. : $lx_cplex_found
-echo SOPLEX support................ : $lx_soplex_found
-echo CLP support................... : $lx_clp_found
-echo CBC support................... : $lx_cbc_found
-echo
-echo Build additional tools........ : $enable_tools
-echo Use valgrind for tests........ : $use_valgrind
-echo
-echo The packace will be installed in
-echo -n '  '
-echo $prefix.
-echo
-echo '*********************************************************************'
-
-echo
-echo Configure complete, now type \'make\' and then \'make install\'.
-echo
Index: contrib/CMakeLists.txt
===================================================================
--- contrib/CMakeLists.txt	(revision 925)
+++ contrib/CMakeLists.txt	(revision 925)
@@ -0,0 +1,19 @@
+INCLUDE_DIRECTORIES(
+  ${PROJECT_SOURCE_DIR}
+  ${PROJECT_BINARY_DIR}
+)
+
+LINK_DIRECTORIES(
+  ${PROJECT_BINARY_DIR}/lemon
+)
+
+# Uncomment (and adjust) the following two lines. 'myprog' is the name
+# of the final executable ('.exe' will automatically be added to the
+# name on Windows) and 'myprog-main.cc' is the source code it is
+# compiled from. You can add more source files separated by
+# whitespaces. Moreover, you can add multiple similar blocks if you
+# want to build more than one executables.
+
+# ADD_EXECUTABLE(myprog myprog-main.cc)
+# TARGET_LINK_LIBRARIES(myprog lemon)
+
Index: mo/Makefile.am
===================================================================
--- demo/Makefile.am	(revision 564)
+++ 	(revision )
@@ -1,17 +1,0 @@
-AM_CXXFLAGS = $(WARNINGCXXFLAGS)
-
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_builddir)
-LDADD = $(top_builddir)/lemon/libemon.la
-
-EXTRA_DIST = \
-	CMakeLists.txt \
-	digraph.lgf
-
-noinst_PROGRAMS = \
-	arg_parser_demo \
-	graph_to_eps_demo \
-	lgf_demo
-
-arg_parser_demo_SOURCES = arg_parser_demo.cc
-graph_to_eps_demo_SOURCES = graph_to_eps_demo.cc
-lgf_demo_SOURCES = lgf_demo.cc
Index: doc/CMakeLists.txt
===================================================================
--- doc/CMakeLists.txt	(revision 964)
+++ doc/CMakeLists.txt	(revision 983)
@@ -17,4 +17,13 @@
   @ONLY
 )
+
+# Copy doc from source (if exists)
+IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/html AND 
+    NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/html/index.html)
+  MESSAGE(STATUS "Copy doc from source tree")
+  EXECUTE_PROCESS(
+    COMMAND cmake -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/html ${CMAKE_CURRENT_BINARY_DIR}/html
+    )
+ENDIF()
 
 IF(DOXYGEN_EXECUTABLE AND PYTHONINTERP_FOUND AND GHOSTSCRIPT_EXECUTABLE)
Index: doc/Doxyfile.in
===================================================================
--- doc/Doxyfile.in	(revision 964)
+++ doc/Doxyfile.in	(revision 966)
@@ -96,4 +96,5 @@
                          "@abs_top_srcdir@/lemon/concepts" \
                          "@abs_top_srcdir@/demo" \
+                         "@abs_top_srcdir@/contrib" \
                          "@abs_top_srcdir@/tools" \
                          "@abs_top_srcdir@/test/test_tools.h" \
Index: c/Makefile.am
===================================================================
--- doc/Makefile.am	(revision 970)
+++ 	(revision )
@@ -1,125 +1,0 @@
-EXTRA_DIST += \
-	doc/Doxyfile.in \
-	doc/DoxygenLayout.xml \
-	doc/coding_style.dox \
-	doc/dirs.dox \
-	doc/groups.dox \
-	doc/lgf.dox \
-	doc/license.dox \
-	doc/mainpage.dox \
-	doc/migration.dox \
-	doc/min_cost_flow.dox \
-	doc/named-param.dox \
-	doc/namespaces.dox \
-	doc/references.bib \
-	doc/template.h \
-	doc/html \
-	doc/CMakeLists.txt
-
-DOC_EPS_IMAGES18 = \
-	grid_graph.eps \
-	nodeshape_0.eps \
-	nodeshape_1.eps \
-	nodeshape_2.eps \
-	nodeshape_3.eps \
-	nodeshape_4.eps
-
-DOC_EPS_IMAGES27 = \
-	bipartite_matching.eps \
-	bipartite_partitions.eps \
-	connected_components.eps \
-	edge_biconnected_components.eps \
-	matching.eps \
-	node_biconnected_components.eps \
-	planar.eps \
-	strongly_connected_components.eps
-
-DOC_EPS_IMAGES = \
-	$(DOC_EPS_IMAGES18) \
-	$(DOC_EPS_IMAGES27)
-
-DOC_PNG_IMAGES = \
-	$(DOC_EPS_IMAGES:%.eps=doc/gen-images/%.png)
-
-EXTRA_DIST += $(DOC_EPS_IMAGES:%=doc/images/%)
-
-doc/html:
-	$(MAKE) $(AM_MAKEFLAGS) html
-
-GS_COMMAND=gs -dNOPAUSE -dBATCH -q -dEPSCrop -dTextAlphaBits=4 -dGraphicsAlphaBits=4
-
-$(DOC_EPS_IMAGES18:%.eps=doc/gen-images/%.png): doc/gen-images/%.png: doc/images/%.eps
-	-mkdir doc/gen-images
-	if test ${gs_found} = yes; then \
-	  $(GS_COMMAND) -sDEVICE=pngalpha -r18 -sOutputFile=$@ $<; \
-	else \
-	  echo; \
-	  echo "Ghostscript not found."; \
-	  echo; \
-	  exit 1; \
-	fi
-
-$(DOC_EPS_IMAGES27:%.eps=doc/gen-images/%.png): doc/gen-images/%.png: doc/images/%.eps
-	-mkdir doc/gen-images
-	if test ${gs_found} = yes; then \
-	  $(GS_COMMAND) -sDEVICE=pngalpha -r27 -sOutputFile=$@ $<; \
-	else \
-	  echo; \
-	  echo "Ghostscript not found."; \
-	  echo; \
-	  exit 1; \
-	fi
-
-references.dox: doc/references.bib
-	if test ${python_found} = yes; then \
-	  cd doc; \
-	  python @abs_top_srcdir@/scripts/bib2dox.py @abs_top_builddir@/$< >$@; \
-	  cd ..; \
-	else \
-	  echo; \
-	  echo "Python not found."; \
-	  echo; \
-	  exit 1; \
-	fi
-
-html-local: $(DOC_PNG_IMAGES) references.dox
-	if test ${doxygen_found} = yes; then \
-	  cd doc; \
-	  doxygen Doxyfile; \
-	  cd ..; \
-	else \
-	  echo; \
-	  echo "Doxygen not found."; \
-	  echo; \
-	  exit 1; \
-	fi
-
-clean-local:
-	-rm -rf doc/html
-	-rm -f doc/doxygen.log
-	-rm -f $(DOC_PNG_IMAGES)
-	-rm -rf doc/gen-images
-
-update-external-tags:
-	wget -O doc/libstdc++.tag.tmp http://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen/libstdc++.tag && \
-	mv doc/libstdc++.tag.tmp doc/libstdc++.tag || \
-	rm doc/libstdc++.tag.tmp
-
-install-html-local: doc/html
-	@$(NORMAL_INSTALL)
-	$(mkinstalldirs) $(DESTDIR)$(htmldir)/html
-	for p in doc/html/*.{html,css,png,map,gif,tag} ; do \
-	  f="`echo $$p | sed -e 's|^.*/||'`"; \
-	  echo " $(INSTALL_DATA) $$p $(DESTDIR)$(htmldir)/html/$$f"; \
-	  $(INSTALL_DATA) $$p $(DESTDIR)$(htmldir)/html/$$f; \
-	done
-
-uninstall-local:
-	@$(NORMAL_UNINSTALL)
-	for p in doc/html/*.{html,css,png,map,gif,tag} ; do \
-	  f="`echo $$p | sed -e 's|^.*/||'`"; \
-	  echo " rm -f $(DESTDIR)$(htmldir)/html/$$f"; \
-	  rm -f $(DESTDIR)$(htmldir)/html/$$f; \
-	done
-
-.PHONY: update-external-tags
Index: doc/coding_style.dox
===================================================================
--- doc/coding_style.dox	(revision 440)
+++ doc/coding_style.dox	(revision 919)
@@ -99,8 +99,8 @@
 \subsection pri-loc-var Private member variables
 
-Private member variables should start with underscore
+Private member variables should start with underscore.
 
 \code
-_start_with_underscores
+_start_with_underscore
 \endcode
 
Index: doc/dirs.dox
===================================================================
--- doc/dirs.dox	(revision 440)
+++ doc/dirs.dox	(revision 925)
@@ -32,4 +32,17 @@
 documentation.
 */
+
+/**
+\dir contrib
+\brief Directory for user contributed source codes.
+
+You can place your own C++ code using LEMON into this directory, which
+will compile to an executable along with LEMON when you build the
+library. This is probably the easiest way of compiling short to medium
+codes, for this does require neither a LEMON installed system-wide nor
+adding several paths to the compiler.
+
+Please have a look at <tt>contrib/CMakeLists.txt</tt> for
+instruction on how to add your own files into the build process.  */
 
 /**
Index: doc/groups.dox
===================================================================
--- doc/groups.dox	(revision 879)
+++ doc/groups.dox	(revision 1003)
@@ -407,8 +407,18 @@
    strongly polynomial \ref klein67primal, \ref goldberg89cyclecanceling.
 
-In general NetworkSimplex is the most efficient implementation,
-but in special cases other algorithms could be faster.
+In general, \ref NetworkSimplex and \ref CostScaling are the most efficient
+implementations.
+\ref NetworkSimplex is usually the fastest on relatively small graphs (up to
+several thousands of nodes) and on dense graphs, while \ref CostScaling is
+typically more efficient on large graphs (e.g. hundreds of thousands of
+nodes or above), especially if they are sparse.
+However, other algorithms could be faster in special cases.
 For example, if the total supply and/or capacities are rather small,
-CapacityScaling is usually the fastest algorithm (without effective scaling).
+\ref CapacityScaling is usually the fastest algorithm (without effective scaling).
+
+These classes are intended to be used with integer-valued input data
+(capacities, supply values, and costs), except for \ref CapacityScaling,
+which is capable of handling real-valued arc costs (other numerical
+data are required to be integer).
 */
 
@@ -449,5 +459,5 @@
 
 This group contains the algorithms for finding minimum mean cycles
-\ref clrs01algorithms, \ref amo93networkflows.
+\ref amo93networkflows, \ref karp78characterization.
 
 The \e minimum \e mean \e cycle \e problem is to find a directed cycle
@@ -465,12 +475,11 @@
 
 LEMON contains three algorithms for solving the minimum mean cycle problem:
-- \ref KarpMmc Karp's original algorithm \ref amo93networkflows,
-  \ref dasdan98minmeancycle.
+- \ref KarpMmc Karp's original algorithm \ref karp78characterization.
 - \ref HartmannOrlinMmc Hartmann-Orlin's algorithm, which is an improved
-  version of Karp's algorithm \ref dasdan98minmeancycle.
+  version of Karp's algorithm \ref hartmann93finding.
 - \ref HowardMmc Howard's policy iteration algorithm
-  \ref dasdan98minmeancycle.
-
-In practice, the \ref HowardMmc "Howard" algorithm proved to be by far the
+  \ref dasdan98minmeancycle, \ref dasdan04experimental.
+
+In practice, the \ref HowardMmc "Howard" algorithm turned out to be by far the
 most efficient one, though the best known theoretical bound on its running
 time is exponential.
@@ -540,5 +549,5 @@
 
 /**
-@defgroup planar Planarity Embedding and Drawing
+@defgroup planar Planar Embedding and Drawing
 @ingroup algs
 \brief Algorithms for planarity checking, embedding and drawing
@@ -552,5 +561,5 @@
 
 /**
-@defgroup approx Approximation Algorithms
+@defgroup approx_algs Approximation Algorithms
 @ingroup algs
 \brief Approximation algorithms.
@@ -558,4 +567,8 @@
 This group contains the approximation and heuristic algorithms
 implemented in LEMON.
+
+<b>Maximum Clique Problem</b>
+  - \ref GrossoLocatelliPullanMc An efficient heuristic algorithm of
+    Grosso, Locatelli, and Pullan.
 */
 
Index: doc/min_cost_flow.dox
===================================================================
--- doc/min_cost_flow.dox	(revision 877)
+++ doc/min_cost_flow.dox	(revision 1002)
@@ -102,5 +102,5 @@
 \f[ lower(uv) \leq f(uv) \leq upper(uv) \quad \forall uv\in A \f]
 
-However if the sum of the supply values is zero, then these two problems
+However, if the sum of the supply values is zero, then these two problems
 are equivalent.
 The \ref min_cost_flow_algs "algorithms" in LEMON support the general
Index: doc/references.bib
===================================================================
--- doc/references.bib	(revision 755)
+++ doc/references.bib	(revision 1002)
@@ -5,6 +5,5 @@
   title =        {{LEMON} -- {L}ibrary for {E}fficient {M}odeling and
                   {O}ptimization in {N}etworks},
-  howpublished = {\url{http://lemon.cs.elte.hu/}},
-  year =         2009
+  howpublished = {\url{http://lemon.cs.elte.hu/}}
 }
 
@@ -212,4 +211,14 @@
   volume =       23,
   pages =        {309-311}
+}
+
+@article{hartmann93finding,
+  author =       {Mark Hartmann and James B. Orlin},
+  title =        {Finding minimum cost to time ratio cycles with small
+                  integral transit times},
+  journal =      {Networks},
+  year =         1993,
+  volume =       23,
+  pages =        {567-574}
 }
 
@@ -226,4 +235,15 @@
 }
 
+@article{dasdan04experimental,
+  author =       {Ali Dasdan},
+  title =        {Experimental analysis of the fastest optimum cycle
+                  ratio and mean algorithms},
+  journal =      {ACM Trans. Des. Autom. Electron. Syst.},
+  year =         2004,
+  volume =       9,
+  issue =        4,
+  pages =        {385-418}
+} 
+
 
 %%%%% Minimum cost flow algorithms %%%%%
@@ -298,4 +318,17 @@
   address =      {Dublin, Ireland},
   year =         1991,
-  month =        sep,
-}
+  month =        sep
+}
+
+%%%%% Other algorithms %%%%%
+
+@article{grosso08maxclique,
+  author =       {Andrea Grosso and Marco Locatelli and Wayne Pullan},
+  title =        {Simple ingredients leading to very efficient
+                  heuristics for the maximum clique problem},
+  journal =      {Journal of Heuristics},
+  year =         2008,
+  volume =       14,
+  number =       6,
+  pages =        {587--612}
+}
Index: lemon/CMakeLists.txt
===================================================================
--- lemon/CMakeLists.txt	(revision 968)
+++ lemon/CMakeLists.txt	(revision 981)
@@ -5,10 +5,10 @@
 
 CONFIGURE_FILE(
-  ${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
+  ${CMAKE_CURRENT_SOURCE_DIR}/config.h.in
   ${CMAKE_CURRENT_BINARY_DIR}/config.h
 )
 
 CONFIGURE_FILE(
-  ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc.cmake
+  ${CMAKE_CURRENT_SOURCE_DIR}/lemon.pc.in
   ${CMAKE_CURRENT_BINARY_DIR}/lemon.pc
   @ONLY
Index: mon/Makefile.am
===================================================================
--- lemon/Makefile.am	(revision 964)
+++ 	(revision )
@@ -1,151 +1,0 @@
-EXTRA_DIST += \
-	lemon/lemon.pc.in \
-	lemon/lemon.pc.cmake \
-	lemon/CMakeLists.txt \
-	lemon/config.h.cmake
-
-pkgconfig_DATA += lemon/lemon.pc
-
-lib_LTLIBRARIES += lemon/libemon.la
-
-lemon_libemon_la_SOURCES = \
-	lemon/arg_parser.cc \
-	lemon/base.cc \
-	lemon/color.cc \
-	lemon/lp_base.cc \
-	lemon/lp_skeleton.cc \
-	lemon/random.cc \
-	lemon/bits/windows.cc
-
-nodist_lemon_HEADERS = lemon/config.h	
-
-lemon_libemon_la_CXXFLAGS = \
-	$(AM_CXXFLAGS) \
-	$(GLPK_CFLAGS) \
-	$(CPLEX_CFLAGS) \
-	$(SOPLEX_CXXFLAGS) \
-	$(CLP_CXXFLAGS) \
-	$(CBC_CXXFLAGS)
-
-lemon_libemon_la_LDFLAGS = \
-	$(GLPK_LIBS) \
-	$(CPLEX_LIBS) \
-	$(SOPLEX_LIBS) \
-	$(CLP_LIBS) \
-	$(CBC_LIBS)
-
-if HAVE_GLPK
-lemon_libemon_la_SOURCES += lemon/glpk.cc
-endif
-
-if HAVE_CPLEX
-lemon_libemon_la_SOURCES += lemon/cplex.cc
-endif
-
-if HAVE_SOPLEX
-lemon_libemon_la_SOURCES += lemon/soplex.cc
-endif
-
-if HAVE_CLP
-lemon_libemon_la_SOURCES += lemon/clp.cc
-endif
-
-if HAVE_CBC
-lemon_libemon_la_SOURCES += lemon/cbc.cc
-endif
-
-lemon_HEADERS += \
-	lemon/adaptors.h \
-	lemon/arg_parser.h \
-	lemon/assert.h \
-	lemon/bellman_ford.h \
-	lemon/bfs.h \
-	lemon/bin_heap.h \
-	lemon/binomial_heap.h \
-	lemon/bucket_heap.h \
-	lemon/capacity_scaling.h \
-	lemon/cbc.h \
-	lemon/circulation.h \
-	lemon/clp.h \
-	lemon/color.h \
-	lemon/concept_check.h \
-	lemon/connectivity.h \
-	lemon/core.h \
-	lemon/cost_scaling.h \
-	lemon/counter.h \
-	lemon/cplex.h \
-	lemon/cycle_canceling.h \
-	lemon/dfs.h \
-	lemon/dheap.h \
-	lemon/dijkstra.h \
-	lemon/dim2.h \
-	lemon/dimacs.h \
-	lemon/edge_set.h \
-	lemon/elevator.h \
-	lemon/error.h \
-	lemon/euler.h \
-	lemon/fib_heap.h \
-	lemon/fractional_matching.h \
-	lemon/full_graph.h \
-	lemon/glpk.h \
-	lemon/gomory_hu.h \
-	lemon/graph_to_eps.h \
-	lemon/grid_graph.h \
-	lemon/hartmann_orlin_mmc.h \
-	lemon/howard_mmc.h \
-	lemon/hypercube_graph.h \
-	lemon/karp_mmc.h \
-	lemon/kruskal.h \
-	lemon/hao_orlin.h \
-	lemon/lgf_reader.h \
-	lemon/lgf_writer.h \
-	lemon/list_graph.h \
-	lemon/lp.h \
-	lemon/lp_base.h \
-	lemon/lp_skeleton.h \
-	lemon/maps.h \
-	lemon/matching.h \
-	lemon/math.h \
-	lemon/min_cost_arborescence.h \
-	lemon/nauty_reader.h \
-	lemon/network_simplex.h \
-	lemon/pairing_heap.h \
-	lemon/path.h \
-	lemon/planarity.h \
-	lemon/preflow.h \
-	lemon/quad_heap.h \
-	lemon/radix_heap.h \
-	lemon/radix_sort.h \
-	lemon/random.h \
-	lemon/smart_graph.h \
-	lemon/soplex.h \
-	lemon/static_graph.h \
-	lemon/suurballe.h \
-	lemon/time_measure.h \
-	lemon/tolerance.h \
-	lemon/unionfind.h \
-	lemon/bits/windows.h
-
-bits_HEADERS += \
-	lemon/bits/alteration_notifier.h \
-	lemon/bits/array_map.h \
-	lemon/bits/bezier.h \
-	lemon/bits/default_map.h \
-	lemon/bits/edge_set_extender.h \
-	lemon/bits/enable_if.h \
-	lemon/bits/graph_adaptor_extender.h \
-	lemon/bits/graph_extender.h \
-	lemon/bits/map_extender.h \
-	lemon/bits/path_dump.h \
-	lemon/bits/solver_bits.h \
-	lemon/bits/traits.h \
-	lemon/bits/variant.h \
-	lemon/bits/vector_map.h
-
-concept_HEADERS += \
-	lemon/concepts/digraph.h \
-	lemon/concepts/graph.h \
-	lemon/concepts/graph_components.h \
-	lemon/concepts/heap.h \
-	lemon/concepts/maps.h \
-	lemon/concepts/path.h
Index: lemon/bits/alteration_notifier.h
===================================================================
--- lemon/bits/alteration_notifier.h	(revision 440)
+++ lemon/bits/alteration_notifier.h	(revision 979)
@@ -24,4 +24,5 @@
 
 #include <lemon/core.h>
+#include <lemon/bits/lock.h>
 
 //\ingroup graphbits
@@ -252,5 +253,5 @@
     typedef std::list<ObserverBase*> Observers;
     Observers _observers;
-
+    lemon::bits::Lock _lock;
 
   public:
@@ -333,12 +334,16 @@
 
     void attach(ObserverBase& observer) {
+      _lock.lock();
       observer._index = _observers.insert(_observers.begin(), &observer);
       observer._notifier = this;
+      _lock.unlock();
     }
 
     void detach(ObserverBase& observer) {
+      _lock.lock();
       _observers.erase(observer._index);
       observer._index = _observers.end();
       observer._notifier = 0;
+      _lock.unlock();
     }
 
Index: lemon/bits/lock.h
===================================================================
--- lemon/bits/lock.h	(revision 979)
+++ lemon/bits/lock.h	(revision 979)
@@ -0,0 +1,65 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2012
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_BITS_LOCK_H
+#define LEMON_BITS_LOCK_H
+
+#include <lemon/config.h>
+#if defined(LEMON_USE_PTHREAD)
+#include <pthread.h>
+#elif defined(LEMON_USE_WIN32_THREADS)
+#include <lemon/bits/windows.h>
+#endif
+
+namespace lemon {
+  namespace bits {
+
+#if defined(LEMON_USE_PTHREAD)
+    class Lock {
+    public:
+      Lock() {
+	pthread_mutex_init(&_lock, 0);
+      }
+      ~Lock() {
+	pthread_mutex_destroy(&_lock);
+      }
+      void lock() {
+	pthread_mutex_lock(&_lock);
+      }
+      void unlock() {
+	pthread_mutex_unlock(&_lock);
+      }
+
+    private:
+      pthread_mutex_t _lock;
+    };
+#elif defined(LEMON_USE_WIN32_THREADS)
+    class Lock : public WinLock {};
+#else
+    class Lock {
+    public:
+      Lock() {}
+      ~Lock() {}
+      void lock() {}
+      void unlock() {}
+    };    
+#endif
+  }
+}
+
+#endif
Index: lemon/bits/windows.cc
===================================================================
--- lemon/bits/windows.cc	(revision 941)
+++ lemon/bits/windows.cc	(revision 1001)
@@ -131,4 +131,36 @@
 #endif
     }
+
+    WinLock::WinLock() {
+#ifdef WIN32
+      CRITICAL_SECTION *lock = new CRITICAL_SECTION;
+      InitializeCriticalSection(lock);
+      _repr = lock;
+#else
+      _repr = 0; //Just to avoid 'unused variable' warning with clang
+#endif
+    }
+    
+    WinLock::~WinLock() {
+#ifdef WIN32
+      CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);
+      DeleteCriticalSection(lock);
+      delete lock;
+#endif
+    }
+
+    void WinLock::lock() {
+#ifdef WIN32
+      CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);
+      EnterCriticalSection(lock);
+#endif
+    }
+
+    void WinLock::unlock() {
+#ifdef WIN32
+      CRITICAL_SECTION *lock = static_cast<CRITICAL_SECTION*>(_repr);
+      LeaveCriticalSection(lock);
+#endif
+    }
   }
 }
Index: lemon/bits/windows.h
===================================================================
--- lemon/bits/windows.h	(revision 529)
+++ lemon/bits/windows.h	(revision 979)
@@ -29,4 +29,14 @@
     std::string getWinFormattedDate();
     int getWinRndSeed();
+
+    class WinLock {
+    public:
+      WinLock();
+      ~WinLock();
+      void lock();
+      void unlock();
+    private:
+      void *_repr;
+    };
   }
 }
Index: lemon/capacity_scaling.h
===================================================================
--- lemon/capacity_scaling.h	(revision 877)
+++ lemon/capacity_scaling.h	(revision 1004)
@@ -71,4 +71,9 @@
   /// solution method.
   ///
+  /// This algorithm is typically slower than \ref CostScaling and
+  /// \ref NetworkSimplex, but in special cases, it can be more
+  /// efficient than them.
+  /// (For more information, see \ref min_cost_flow_algs "the module page".)
+  ///
   /// Most of the parameters of the problem (except for the digraph)
   /// can be given using separate functions, and the algorithm can be
@@ -87,8 +92,9 @@
   /// consider to use the named template parameters instead.
   ///
-  /// \warning Both number types must be signed and all input data must
-  /// be integer.
-  /// \warning This algorithm does not support negative costs for such
-  /// arcs that have infinite upper bound.
+  /// \warning Both \c V and \c C must be signed number types.
+  /// \warning Capacity bounds and supply values must be integer, but
+  /// arc costs can be arbitrary real numbers.
+  /// \warning This algorithm does not support negative costs for
+  /// arcs having infinite upper bound.
 #ifdef DOXYGEN
   template <typename GR, typename V, typename C, typename TR>
@@ -423,5 +429,5 @@
     ///
     /// Using this function has the same effect as using \ref supplyMap()
-    /// with such a map in which \c k is assigned to \c s, \c -k is
+    /// with a map in which \c k is assigned to \c s, \c -k is
     /// assigned to \c t and all other nodes have zero supply value.
     ///
@@ -676,5 +682,6 @@
     }
 
-    /// \brief Return the flow map (the primal solution).
+    /// \brief Copy the flow values (the primal solution) into the
+    /// given map.
     ///
     /// This function copies the flow value on each arc into the given
@@ -700,5 +707,6 @@
     }
 
-    /// \brief Return the potential map (the dual solution).
+    /// \brief Copy the potential values (the dual solution) into the
+    /// given map.
     ///
     /// This function copies the potential (dual value) of each node
Index: mon/config.h.cmake
===================================================================
--- lemon/config.h.cmake	(revision 678)
+++ 	(revision )
@@ -1,8 +1,0 @@
-#define LEMON_VERSION "@PROJECT_VERSION@"
-#cmakedefine LEMON_HAVE_LONG_LONG 1
-#cmakedefine LEMON_HAVE_LP 1
-#cmakedefine LEMON_HAVE_MIP 1
-#cmakedefine LEMON_HAVE_GLPK 1
-#cmakedefine LEMON_HAVE_CPLEX 1
-#cmakedefine LEMON_HAVE_CLP 1
-#cmakedefine LEMON_HAVE_CBC 1
Index: lemon/config.h.in
===================================================================
--- lemon/config.h.in	(revision 678)
+++ lemon/config.h.in	(revision 981)
@@ -1,26 +1,10 @@
-/* The version string */
-#undef LEMON_VERSION
-
-/* Define to 1 if you have long long */
-#undef LEMON_HAVE_LONG_LONG
-
-/* Define to 1 if you have any LP solver. */
-#undef LEMON_HAVE_LP
-
-/* Define to 1 if you have any MIP solver. */
-#undef LEMON_HAVE_MIP
-
-/* Define to 1 if you have CPLEX. */
-#undef LEMON_HAVE_CPLEX
-
-/* Define to 1 if you have GLPK. */
-#undef LEMON_HAVE_GLPK
-
-/* Define to 1 if you have SOPLEX */
-#undef LEMON_HAVE_SOPLEX
-
-/* Define to 1 if you have CLP */
-#undef LEMON_HAVE_CLP
-
-/* Define to 1 if you have CBC */
-#undef LEMON_HAVE_CBC
+#define LEMON_VERSION "@PROJECT_VERSION@"
+#cmakedefine LEMON_HAVE_LONG_LONG 1
+#cmakedefine LEMON_HAVE_LP 1
+#cmakedefine LEMON_HAVE_MIP 1
+#cmakedefine LEMON_HAVE_GLPK 1
+#cmakedefine LEMON_HAVE_CPLEX 1
+#cmakedefine LEMON_HAVE_CLP 1
+#cmakedefine LEMON_HAVE_CBC 1
+#cmakedefine LEMON_USE_PTHREAD 1
+#cmakedefine LEMON_USE_WIN32_THREADS 1
Index: lemon/core.h
===================================================================
--- lemon/core.h	(revision 998)
+++ lemon/core.h	(revision 1000)
@@ -447,4 +447,23 @@
 
   }
+
+  /// \brief Check whether a graph is undirected.
+  ///
+  /// This function returns \c true if the given graph is undirected.
+#ifdef DOXYGEN
+  template <typename GR>
+  bool undirected(const GR& g) { return false; }
+#else
+  template <typename GR>
+  typename enable_if<UndirectedTagIndicator<GR>, bool>::type
+  undirected(const GR&) {
+    return true;
+  }
+  template <typename GR>
+  typename disable_if<UndirectedTagIndicator<GR>, bool>::type
+  undirected(const GR&) {
+    return false;
+  }
+#endif
 
   /// \brief Class to copy a digraph.
Index: lemon/cost_scaling.h
===================================================================
--- lemon/cost_scaling.h	(revision 931)
+++ lemon/cost_scaling.h	(revision 1003)
@@ -98,4 +98,8 @@
   /// "preflow push-relabel" algorithm for the maximum flow problem.
   ///
+  /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest
+  /// implementations available in LEMON for solving this problem.
+  /// (For more information, see \ref min_cost_flow_algs "the module page".)
+  ///
   /// Most of the parameters of the problem (except for the digraph)
   /// can be given using separate functions, and the algorithm can be
@@ -114,8 +118,9 @@
   /// consider to use the named template parameters instead.
   ///
-  /// \warning Both number types must be signed and all input data must
+  /// \warning Both \c V and \c C must be signed number types.
+  /// \warning All input data (capacities, supply values, and costs) must
   /// be integer.
-  /// \warning This algorithm does not support negative costs for such
-  /// arcs that have infinite upper bound.
+  /// \warning This algorithm does not support negative costs for
+  /// arcs having infinite upper bound.
   ///
   /// \note %CostScaling provides three different internal methods,
@@ -179,5 +184,5 @@
     /// relabel operation.
     /// By default, the so called \ref PARTIAL_AUGMENT
-    /// "Partial Augment-Relabel" method is used, which proved to be
+    /// "Partial Augment-Relabel" method is used, which turned out to be
     /// the most efficient and the most robust on various test inputs.
     /// However, the other methods can be selected using the \ref run()
@@ -234,5 +239,4 @@
     };
 
-    typedef StaticVectorMap<StaticDigraph::Node, LargeCost> LargeCostNodeMap;
     typedef StaticVectorMap<StaticDigraph::Arc, LargeCost> LargeCostArcMap;
 
@@ -285,12 +289,4 @@
     int _max_rank;
 
-    // Data for a StaticDigraph structure
-    typedef std::pair<int, int> IntPair;
-    StaticDigraph _sgr;
-    std::vector<IntPair> _arc_vec;
-    std::vector<LargeCost> _cost_vec;
-    LargeCostArcMap _cost_map;
-    LargeCostNodeMap _pi_map;
-
   public:
 
@@ -339,5 +335,4 @@
     CostScaling(const GR& graph) :
       _graph(graph), _node_id(graph), _arc_idf(graph), _arc_idb(graph),
-      _cost_map(_cost_vec), _pi_map(_pi),
       INF(std::numeric_limits<Value>::has_infinity ?
           std::numeric_limits<Value>::infinity() :
@@ -448,5 +443,5 @@
     ///
     /// Using this function has the same effect as using \ref supplyMap()
-    /// with such a map in which \c k is assigned to \c s, \c -k is
+    /// with a map in which \c k is assigned to \c s, \c -k is
     /// assigned to \c t and all other nodes have zero supply value.
     ///
@@ -494,5 +489,5 @@
     /// \param method The internal method that will be used in the
     /// algorithm. For more information, see \ref Method.
-    /// \param factor The cost scaling factor. It must be larger than one.
+    /// \param factor The cost scaling factor. It must be at least two.
     ///
     /// \return \c INFEASIBLE if no feasible flow exists,
@@ -508,5 +503,6 @@
     /// \see ProblemType, Method
     /// \see resetParams(), reset()
-    ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 8) {
+    ProblemType run(Method method = PARTIAL_AUGMENT, int factor = 16) {
+      LEMON_ASSERT(factor >= 2, "The scaling factor must be at least 2");
       _alpha = factor;
       ProblemType pt = init();
@@ -572,16 +568,23 @@
     }
 
-    /// \brief Reset all the parameters that have been given before.
-    ///
-    /// This function resets all the paramaters that have been given
-    /// before using functions \ref lowerMap(), \ref upperMap(),
-    /// \ref costMap(), \ref supplyMap(), \ref stSupply().
-    ///
-    /// It is useful for multiple run() calls. If this function is not
-    /// used, all the parameters given before are kept for the next
-    /// \ref run() call.
-    /// However, the underlying digraph must not be modified after this
-    /// class have been constructed, since it copies and extends the graph.
+    /// \brief Reset the internal data structures and all the parameters
+    /// that have been given before.
+    ///
+    /// This function resets the internal data structures and all the
+    /// paramaters that have been given before using functions \ref lowerMap(),
+    /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply().
+    ///
+    /// It is useful for multiple \ref run() calls. By default, all the given
+    /// parameters are kept for the next \ref run() call, unless
+    /// \ref resetParams() or \ref reset() is used.
+    /// If the underlying digraph was also modified after the construction
+    /// of the class or the last \ref reset() call, then the \ref reset()
+    /// function must be used, otherwise \ref resetParams() is sufficient.
+    ///
+    /// See \ref resetParams() for examples.
+    ///
     /// \return <tt>(*this)</tt>
+    ///
+    /// \see resetParams(), run()
     CostScaling& reset() {
       // Resize vectors
@@ -608,7 +611,4 @@
       _excess.resize(_res_node_num);
       _next_out.resize(_res_node_num);
-
-      _arc_vec.reserve(_res_arc_num);
-      _cost_vec.reserve(_res_arc_num);
 
       // Copy the graph
@@ -706,5 +706,6 @@
     }
 
-    /// \brief Return the flow map (the primal solution).
+    /// \brief Copy the flow values (the primal solution) into the
+    /// given map.
     ///
     /// This function copies the flow value on each arc into the given
@@ -730,5 +731,6 @@
     }
 
-    /// \brief Return the potential map (the dual solution).
+    /// \brief Copy the potential values (the dual solution) into the
+    /// given map.
     ///
     /// This function copies the potential (dual value) of each node
@@ -887,12 +889,4 @@
       }
 
-      return OPTIMAL;
-    }
-
-    // Execute the algorithm and transform the results
-    void start(Method method) {
-      // Maximum path length for partial augment
-      const int MAX_PATH_LENGTH = 4;
-
       // Initialize data structures for buckets
       _max_rank = _alpha * _res_node_num;
@@ -902,5 +896,11 @@
       _rank.resize(_res_node_num + 1);
 
-      // Execute the algorithm
+      return OPTIMAL;
+    }
+
+    // Execute the algorithm and transform the results
+    void start(Method method) {
+      const int MAX_PARTIAL_PATH_LENGTH = 4;
+
       switch (method) {
         case PUSH:
@@ -911,24 +911,65 @@
           break;
         case PARTIAL_AUGMENT:
-          startAugment(MAX_PATH_LENGTH);
+          startAugment(MAX_PARTIAL_PATH_LENGTH);
           break;
       }
 
-      // Compute node potentials for the original costs
-      _arc_vec.clear();
-      _cost_vec.clear();
-      for (int j = 0; j != _res_arc_num; ++j) {
-        if (_res_cap[j] > 0) {
-          _arc_vec.push_back(IntPair(_source[j], _target[j]));
-          _cost_vec.push_back(_scost[j]);
-        }
-      }
-      _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
-
-      typename BellmanFord<StaticDigraph, LargeCostArcMap>
-        ::template SetDistMap<LargeCostNodeMap>::Create bf(_sgr, _cost_map);
-      bf.distMap(_pi_map);
-      bf.init(0);
-      bf.start();
+      // Compute node potentials (dual solution)
+      for (int i = 0; i != _res_node_num; ++i) {
+        _pi[i] = static_cast<Cost>(_pi[i] / (_res_node_num * _alpha));
+      }
+      bool optimal = true;
+      for (int i = 0; optimal && i != _res_node_num; ++i) {
+        LargeCost pi_i = _pi[i];
+        int last_out = _first_out[i+1];
+        for (int j = _first_out[i]; j != last_out; ++j) {
+          if (_res_cap[j] > 0 && _scost[j] + pi_i - _pi[_target[j]] < 0) {
+            optimal = false;
+            break;
+          }
+        }
+      }
+
+      if (!optimal) {
+        // Compute node potentials for the original costs with BellmanFord
+        // (if it is necessary)
+        typedef std::pair<int, int> IntPair;
+        StaticDigraph sgr;
+        std::vector<IntPair> arc_vec;
+        std::vector<LargeCost> cost_vec;
+        LargeCostArcMap cost_map(cost_vec);
+
+        arc_vec.clear();
+        cost_vec.clear();
+        for (int j = 0; j != _res_arc_num; ++j) {
+          if (_res_cap[j] > 0) {
+            int u = _source[j], v = _target[j];
+            arc_vec.push_back(IntPair(u, v));
+            cost_vec.push_back(_scost[j] + _pi[u] - _pi[v]);
+          }
+        }
+        sgr.build(_res_node_num, arc_vec.begin(), arc_vec.end());
+
+        typename BellmanFord<StaticDigraph, LargeCostArcMap>::Create
+          bf(sgr, cost_map);
+        bf.init(0);
+        bf.start();
+
+        for (int i = 0; i != _res_node_num; ++i) {
+          _pi[i] += bf.dist(sgr.node(i));
+        }
+      }
+
+      // Shift potentials to meet the requirements of the GEQ type
+      // optimality conditions
+      LargeCost max_pot = _pi[_root];
+      for (int i = 0; i != _res_node_num; ++i) {
+        if (_pi[i] > max_pot) max_pot = _pi[i];
+      }
+      if (max_pot != 0) {
+        for (int i = 0; i != _res_node_num; ++i) {
+          _pi[i] -= max_pot;
+        }
+      }
 
       // Handle non-zero lower bounds
@@ -948,11 +989,13 @@
         LargeCost pi_u = _pi[u];
         for (int a = _first_out[u]; a != last_out; ++a) {
-          int v = _target[a];
-          if (_res_cap[a] > 0 && _cost[a] + pi_u - _pi[v] < 0) {
-            Value delta = _res_cap[a];
-            _excess[u] -= delta;
-            _excess[v] += delta;
-            _res_cap[a] = 0;
-            _res_cap[_reverse[a]] += delta;
+          Value delta = _res_cap[a];
+          if (delta > 0) {
+            int v = _target[a];
+            if (_cost[a] + pi_u - _pi[v] < 0) {
+              _excess[u] -= delta;
+              _excess[v] += delta;
+              _res_cap[a] = 0;
+              _res_cap[_reverse[a]] += delta;
+            }
           }
         }
@@ -970,33 +1013,232 @@
     }
 
-    // Early termination heuristic
-    bool earlyTermination() {
-      const double EARLY_TERM_FACTOR = 3.0;
-
-      // Build a static residual graph
-      _arc_vec.clear();
-      _cost_vec.clear();
-      for (int j = 0; j != _res_arc_num; ++j) {
-        if (_res_cap[j] > 0) {
-          _arc_vec.push_back(IntPair(_source[j], _target[j]));
-          _cost_vec.push_back(_cost[j] + 1);
-        }
-      }
-      _sgr.build(_res_node_num, _arc_vec.begin(), _arc_vec.end());
-
-      // Run Bellman-Ford algorithm to check if the current flow is optimal
-      BellmanFord<StaticDigraph, LargeCostArcMap> bf(_sgr, _cost_map);
-      bf.init(0);
-      bool done = false;
-      int K = int(EARLY_TERM_FACTOR * std::sqrt(double(_res_node_num)));
-      for (int i = 0; i < K && !done; ++i) {
-        done = bf.processNextWeakRound();
-      }
-      return done;
+    // Price (potential) refinement heuristic
+    bool priceRefinement() {
+
+      // Stack for stroing the topological order
+      IntVector stack(_res_node_num);
+      int stack_top;
+
+      // Perform phases
+      while (topologicalSort(stack, stack_top)) {
+
+        // Compute node ranks in the acyclic admissible network and
+        // store the nodes in buckets
+        for (int i = 0; i != _res_node_num; ++i) {
+          _rank[i] = 0;
+        }
+        const int bucket_end = _root + 1;
+        for (int r = 0; r != _max_rank; ++r) {
+          _buckets[r] = bucket_end;
+        }
+        int top_rank = 0;
+        for ( ; stack_top >= 0; --stack_top) {
+          int u = stack[stack_top], v;
+          int rank_u = _rank[u];
+
+          LargeCost rc, pi_u = _pi[u];
+          int last_out = _first_out[u+1];
+          for (int a = _first_out[u]; a != last_out; ++a) {
+            if (_res_cap[a] > 0) {
+              v = _target[a];
+              rc = _cost[a] + pi_u - _pi[v];
+              if (rc < 0) {
+                LargeCost nrc = static_cast<LargeCost>((-rc - 0.5) / _epsilon);
+                if (nrc < LargeCost(_max_rank)) {
+                  int new_rank_v = rank_u + static_cast<int>(nrc);
+                  if (new_rank_v > _rank[v]) {
+                    _rank[v] = new_rank_v;
+                  }
+                }
+              }
+            }
+          }
+
+          if (rank_u > 0) {
+            top_rank = std::max(top_rank, rank_u);
+            int bfirst = _buckets[rank_u];
+            _bucket_next[u] = bfirst;
+            _bucket_prev[bfirst] = u;
+            _buckets[rank_u] = u;
+          }
+        }
+
+        // Check if the current flow is epsilon-optimal
+        if (top_rank == 0) {
+          return true;
+        }
+
+        // Process buckets in top-down order
+        for (int rank = top_rank; rank > 0; --rank) {
+          while (_buckets[rank] != bucket_end) {
+            // Remove the first node from the current bucket
+            int u = _buckets[rank];
+            _buckets[rank] = _bucket_next[u];
+
+            // Search the outgoing arcs of u
+            LargeCost rc, pi_u = _pi[u];
+            int last_out = _first_out[u+1];
+            int v, old_rank_v, new_rank_v;
+            for (int a = _first_out[u]; a != last_out; ++a) {
+              if (_res_cap[a] > 0) {
+                v = _target[a];
+                old_rank_v = _rank[v];
+
+                if (old_rank_v < rank) {
+
+                  // Compute the new rank of node v
+                  rc = _cost[a] + pi_u - _pi[v];
+                  if (rc < 0) {
+                    new_rank_v = rank;
+                  } else {
+                    LargeCost nrc = rc / _epsilon;
+                    new_rank_v = 0;
+                    if (nrc < LargeCost(_max_rank)) {
+                      new_rank_v = rank - 1 - static_cast<int>(nrc);
+                    }
+                  }
+
+                  // Change the rank of node v
+                  if (new_rank_v > old_rank_v) {
+                    _rank[v] = new_rank_v;
+
+                    // Remove v from its old bucket
+                    if (old_rank_v > 0) {
+                      if (_buckets[old_rank_v] == v) {
+                        _buckets[old_rank_v] = _bucket_next[v];
+                      } else {
+                        int pv = _bucket_prev[v], nv = _bucket_next[v];
+                        _bucket_next[pv] = nv;
+                        _bucket_prev[nv] = pv;
+                      }
+                    }
+
+                    // Insert v into its new bucket
+                    int nv = _buckets[new_rank_v];
+                    _bucket_next[v] = nv;
+                    _bucket_prev[nv] = v;
+                    _buckets[new_rank_v] = v;
+                  }
+                }
+              }
+            }
+
+            // Refine potential of node u
+            _pi[u] -= rank * _epsilon;
+          }
+        }
+
+      }
+
+      return false;
+    }
+
+    // Find and cancel cycles in the admissible network and
+    // determine topological order using DFS
+    bool topologicalSort(IntVector &stack, int &stack_top) {
+      const int MAX_CYCLE_CANCEL = 1;
+
+      BoolVector reached(_res_node_num, false);
+      BoolVector processed(_res_node_num, false);
+      IntVector pred(_res_node_num);
+      for (int i = 0; i != _res_node_num; ++i) {
+        _next_out[i] = _first_out[i];
+      }
+      stack_top = -1;
+
+      int cycle_cnt = 0;
+      for (int start = 0; start != _res_node_num; ++start) {
+        if (reached[start]) continue;
+
+        // Start DFS search from this start node
+        pred[start] = -1;
+        int tip = start, v;
+        while (true) {
+          // Check the outgoing arcs of the current tip node
+          reached[tip] = true;
+          LargeCost pi_tip = _pi[tip];
+          int a, last_out = _first_out[tip+1];
+          for (a = _next_out[tip]; a != last_out; ++a) {
+            if (_res_cap[a] > 0) {
+              v = _target[a];
+              if (_cost[a] + pi_tip - _pi[v] < 0) {
+                if (!reached[v]) {
+                  // A new node is reached
+                  reached[v] = true;
+                  pred[v] = tip;
+                  _next_out[tip] = a;
+                  tip = v;
+                  a = _next_out[tip];
+                  last_out = _first_out[tip+1];
+                  break;
+                }
+                else if (!processed[v]) {
+                  // A cycle is found
+                  ++cycle_cnt;
+                  _next_out[tip] = a;
+
+                  // Find the minimum residual capacity along the cycle
+                  Value d, delta = _res_cap[a];
+                  int u, delta_node = tip;
+                  for (u = tip; u != v; ) {
+                    u = pred[u];
+                    d = _res_cap[_next_out[u]];
+                    if (d <= delta) {
+                      delta = d;
+                      delta_node = u;
+                    }
+                  }
+
+                  // Augment along the cycle
+                  _res_cap[a] -= delta;
+                  _res_cap[_reverse[a]] += delta;
+                  for (u = tip; u != v; ) {
+                    u = pred[u];
+                    int ca = _next_out[u];
+                    _res_cap[ca] -= delta;
+                    _res_cap[_reverse[ca]] += delta;
+                  }
+
+                  // Check the maximum number of cycle canceling
+                  if (cycle_cnt >= MAX_CYCLE_CANCEL) {
+                    return false;
+                  }
+
+                  // Roll back search to delta_node
+                  if (delta_node != tip) {
+                    for (u = tip; u != delta_node; u = pred[u]) {
+                      reached[u] = false;
+                    }
+                    tip = delta_node;
+                    a = _next_out[tip] + 1;
+                    last_out = _first_out[tip+1];
+                    break;
+                  }
+                }
+              }
+            }
+          }
+
+          // Step back to the previous node
+          if (a == last_out) {
+            processed[tip] = true;
+            stack[++stack_top] = tip;
+            tip = pred[tip];
+            if (tip < 0) {
+              // Finish DFS from the current start node
+              break;
+            }
+            ++_next_out[tip];
+          }
+        }
+
+      }
+
+      return (cycle_cnt == 0);
     }
 
     // Global potential update heuristic
     void globalUpdate() {
-      int bucket_end = _root + 1;
+      const int bucket_end = _root + 1;
 
       // Initialize buckets
@@ -1005,10 +1247,11 @@
       }
       Value total_excess = 0;
+      int b0 = bucket_end;
       for (int i = 0; i != _res_node_num; ++i) {
         if (_excess[i] < 0) {
           _rank[i] = 0;
-          _bucket_next[i] = _buckets[0];
-          _bucket_prev[_buckets[0]] = i;
-          _buckets[0] = i;
+          _bucket_next[i] = b0;
+          _bucket_prev[b0] = i;
+          b0 = i;
         } else {
           total_excess += _excess[i];
@@ -1017,4 +1260,5 @@
       }
       if (total_excess == 0) return;
+      _buckets[0] = b0;
 
       // Search the buckets
@@ -1038,6 +1282,7 @@
                 LargeCost nrc = (_cost[ra] + _pi[v] - pi_u) / _epsilon;
                 int new_rank_v = old_rank_v;
-                if (nrc < LargeCost(_max_rank))
-                  new_rank_v = r + 1 + int(nrc);
+                if (nrc < LargeCost(_max_rank)) {
+                  new_rank_v = r + 1 + static_cast<int>(nrc);
+                }
 
                 // Change the rank of v
@@ -1051,12 +1296,14 @@
                       _buckets[old_rank_v] = _bucket_next[v];
                     } else {
-                      _bucket_next[_bucket_prev[v]] = _bucket_next[v];
-                      _bucket_prev[_bucket_next[v]] = _bucket_prev[v];
+                      int pv = _bucket_prev[v], nv = _bucket_next[v];
+                      _bucket_next[pv] = nv;
+                      _bucket_prev[nv] = pv;
                     }
                   }
 
-                  // Insert v to its new bucket
-                  _bucket_next[v] = _buckets[new_rank_v];
-                  _bucket_prev[_buckets[new_rank_v]] = v;
+                  // Insert v into its new bucket
+                  int nv = _buckets[new_rank_v];
+                  _bucket_next[v] = nv;
+                  _bucket_prev[nv] = v;
                   _buckets[new_rank_v] = v;
                 }
@@ -1087,21 +1334,23 @@
     void startAugment(int max_length) {
       // Paramters for heuristics
-      const int EARLY_TERM_EPSILON_LIMIT = 1000;
-      const double GLOBAL_UPDATE_FACTOR = 3.0;
-
-      const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
+      const int PRICE_REFINEMENT_LIMIT = 2;
+      const double GLOBAL_UPDATE_FACTOR = 1.0;
+      const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR *
         (_res_node_num + _sup_node_num * _sup_node_num));
-      int next_update_limit = global_update_freq;
-
+      int next_global_update_limit = global_update_skip;
+
+      // Perform cost scaling phases
+      IntVector path;
+      BoolVector path_arc(_res_arc_num, false);
       int relabel_cnt = 0;
-
-      // Perform cost scaling phases
-      std::vector<int> path;
+      int eps_phase_cnt = 0;
       for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
                                         1 : _epsilon / _alpha )
       {
-        // Early termination heuristic
-        if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
-          if (earlyTermination()) break;
+        ++eps_phase_cnt;
+
+        // Price refinement heuristic
+        if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
+          if (priceRefinement()) continue;
         }
 
@@ -1120,30 +1369,43 @@
 
           // Find an augmenting path from the start node
-          path.clear();
           int tip = start;
-          while (_excess[tip] >= 0 && int(path.size()) < max_length) {
+          while (int(path.size()) < max_length && _excess[tip] >= 0) {
             int u;
-            LargeCost min_red_cost, rc, pi_tip = _pi[tip];
+            LargeCost rc, min_red_cost = std::numeric_limits<LargeCost>::max();
+            LargeCost pi_tip = _pi[tip];
             int last_out = _first_out[tip+1];
             for (int a = _next_out[tip]; a != last_out; ++a) {
-              u = _target[a];
-              if (_res_cap[a] > 0 && _cost[a] + pi_tip - _pi[u] < 0) {
-                path.push_back(a);
-                _next_out[tip] = a;
-                tip = u;
-                goto next_step;
+              if (_res_cap[a] > 0) {
+                u = _target[a];
+                rc = _cost[a] + pi_tip - _pi[u];
+                if (rc < 0) {
+                  path.push_back(a);
+                  _next_out[tip] = a;
+                  if (path_arc[a]) {
+                    goto augment;   // a cycle is found, stop path search
+                  }
+                  tip = u;
+                  path_arc[a] = true;
+                  goto next_step;
+                }
+                else if (rc < min_red_cost) {
+                  min_red_cost = rc;
+                }
               }
             }
 
             // Relabel tip node
-            min_red_cost = std::numeric_limits<LargeCost>::max();
             if (tip != start) {
               int ra = _reverse[path.back()];
-              min_red_cost = _cost[ra] + pi_tip - _pi[_target[ra]];
+              min_red_cost =
+                std::min(min_red_cost, _cost[ra] + pi_tip - _pi[_target[ra]]);
             }
+            last_out = _next_out[tip];
             for (int a = _first_out[tip]; a != last_out; ++a) {
-              rc = _cost[a] + pi_tip - _pi[_target[a]];
-              if (_res_cap[a] > 0 && rc < min_red_cost) {
-                min_red_cost = rc;
+              if (_res_cap[a] > 0) {
+                rc = _cost[a] + pi_tip - _pi[_target[a]];
+                if (rc < min_red_cost) {
+                  min_red_cost = rc;
+                }
               }
             }
@@ -1154,5 +1416,7 @@
             // Step back
             if (tip != start) {
-              tip = _source[path.back()];
+              int pa = path.back();
+              path_arc[pa] = false;
+              tip = _source[pa];
               path.pop_back();
             }
@@ -1162,4 +1426,5 @@
 
           // Augment along the found path (as much flow as possible)
+        augment:
           Value delta;
           int pa, u, v = start;
@@ -1168,4 +1433,5 @@
             u = v;
             v = _target[pa];
+            path_arc[pa] = false;
             delta = std::min(_res_cap[pa], _excess[u]);
             _res_cap[pa] -= delta;
@@ -1173,15 +1439,19 @@
             _excess[u] -= delta;
             _excess[v] += delta;
-            if (_excess[v] > 0 && _excess[v] <= delta)
+            if (_excess[v] > 0 && _excess[v] <= delta) {
               _active_nodes.push_back(v);
-          }
+            }
+          }
+          path.clear();
 
           // Global update heuristic
-          if (relabel_cnt >= next_update_limit) {
+          if (relabel_cnt >= next_global_update_limit) {
             globalUpdate();
-            next_update_limit += global_update_freq;
-          }
-        }
-      }
+            next_global_update_limit += global_update_skip;
+          }
+        }
+
+      }
+
     }
 
@@ -1189,22 +1459,24 @@
     void startPush() {
       // Paramters for heuristics
-      const int EARLY_TERM_EPSILON_LIMIT = 1000;
+      const int PRICE_REFINEMENT_LIMIT = 2;
       const double GLOBAL_UPDATE_FACTOR = 2.0;
 
-      const int global_update_freq = int(GLOBAL_UPDATE_FACTOR *
+      const int global_update_skip = static_cast<int>(GLOBAL_UPDATE_FACTOR *
         (_res_node_num + _sup_node_num * _sup_node_num));
-      int next_update_limit = global_update_freq;
-
-      int relabel_cnt = 0;
+      int next_global_update_limit = global_update_skip;
 
       // Perform cost scaling phases
       BoolVector hyper(_res_node_num, false);
       LargeCostVector hyper_cost(_res_node_num);
+      int relabel_cnt = 0;
+      int eps_phase_cnt = 0;
       for ( ; _epsilon >= 1; _epsilon = _epsilon < _alpha && _epsilon > 1 ?
                                         1 : _epsilon / _alpha )
       {
-        // Early termination heuristic
-        if (_epsilon <= EARLY_TERM_EPSILON_LIMIT) {
-          if (earlyTermination()) break;
+        ++eps_phase_cnt;
+
+        // Price refinement heuristic
+        if (eps_phase_cnt >= PRICE_REFINEMENT_LIMIT) {
+          if (priceRefinement()) continue;
         }
 
@@ -1278,7 +1550,9 @@
                std::numeric_limits<LargeCost>::max();
             for (int a = _first_out[n]; a != last_out; ++a) {
-              rc = _cost[a] + pi_n - _pi[_target[a]];
-              if (_res_cap[a] > 0 && rc < min_red_cost) {
-                min_red_cost = rc;
+              if (_res_cap[a] > 0) {
+                rc = _cost[a] + pi_n - _pi[_target[a]];
+                if (rc < min_red_cost) {
+                  min_red_cost = rc;
+                }
               }
             }
@@ -1298,9 +1572,9 @@
 
           // Global update heuristic
-          if (relabel_cnt >= next_update_limit) {
+          if (relabel_cnt >= next_global_update_limit) {
             globalUpdate();
             for (int u = 0; u != _res_node_num; ++u)
               hyper[u] = false;
-            next_update_limit += global_update_freq;
+            next_global_update_limit += global_update_skip;
           }
         }
Index: lemon/cycle_canceling.h
===================================================================
--- lemon/cycle_canceling.h	(revision 877)
+++ lemon/cycle_canceling.h	(revision 1003)
@@ -49,9 +49,10 @@
   /// \ref amo93networkflows, \ref klein67primal,
   /// \ref goldberg89cyclecanceling.
-  /// The most efficent one (both theoretically and practically)
-  /// is the \ref CANCEL_AND_TIGHTEN "Cancel and Tighten" algorithm,
-  /// thus it is the default method.
-  /// It is strongly polynomial, but in practice, it is typically much
-  /// slower than the scaling algorithms and NetworkSimplex.
+  /// The most efficent one is the \ref CANCEL_AND_TIGHTEN
+  /// "Cancel-and-Tighten" algorithm, thus it is the default method.
+  /// It runs in strongly polynomial time, but in practice, it is typically
+  /// orders of magnitude slower than the scaling algorithms and
+  /// \ref NetworkSimplex.
+  /// (For more information, see \ref min_cost_flow_algs "the module page".)
   ///
   /// Most of the parameters of the problem (except for the digraph)
@@ -66,8 +67,9 @@
   /// algorithm. By default, it is the same as \c V.
   ///
-  /// \warning Both number types must be signed and all input data must
+  /// \warning Both \c V and \c C must be signed number types.
+  /// \warning All input data (capacities, supply values, and costs) must
   /// be integer.
-  /// \warning This algorithm does not support negative costs for such
-  /// arcs that have infinite upper bound.
+  /// \warning This algorithm does not support negative costs for
+  /// arcs having infinite upper bound.
   ///
   /// \note For more information about the three available methods,
@@ -116,13 +118,14 @@
     ///
     /// \ref CycleCanceling provides three different cycle-canceling
-    /// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel and Tighten"
-    /// is used, which proved to be the most efficient and the most robust
-    /// on various test inputs.
+    /// methods. By default, \ref CANCEL_AND_TIGHTEN "Cancel-and-Tighten"
+    /// is used, which is by far the most efficient and the most robust.
     /// However, the other methods can be selected using the \ref run()
     /// function with the proper parameter.
     enum Method {
       /// A simple cycle-canceling method, which uses the
-      /// \ref BellmanFord "Bellman-Ford" algorithm with limited iteration
-      /// number for detecting negative cycles in the residual network.
+      /// \ref BellmanFord "Bellman-Ford" algorithm for detecting negative
+      /// cycles in the residual network.
+      /// The number of Bellman-Ford iterations is bounded by a successively
+      /// increased limit.
       SIMPLE_CYCLE_CANCELING,
       /// The "Minimum Mean Cycle-Canceling" algorithm, which is a
@@ -130,11 +133,11 @@
       /// \ref goldberg89cyclecanceling. It improves along a
       /// \ref min_mean_cycle "minimum mean cycle" in each iteration.
-      /// Its running time complexity is O(n<sup>2</sup>m<sup>3</sup>log(n)).
+      /// Its running time complexity is O(n<sup>2</sup>e<sup>3</sup>log(n)).
       MINIMUM_MEAN_CYCLE_CANCELING,
-      /// The "Cancel And Tighten" algorithm, which can be viewed as an
+      /// The "Cancel-and-Tighten" algorithm, which can be viewed as an
       /// improved version of the previous method
       /// \ref goldberg89cyclecanceling.
       /// It is faster both in theory and in practice, its running time
-      /// complexity is O(n<sup>2</sup>m<sup>2</sup>log(n)).
+      /// complexity is O(n<sup>2</sup>e<sup>2</sup>log(n)).
       CANCEL_AND_TIGHTEN
     };
@@ -350,5 +353,5 @@
     ///
     /// Using this function has the same effect as using \ref supplyMap()
-    /// with such a map in which \c k is assigned to \c s, \c -k is
+    /// with a map in which \c k is assigned to \c s, \c -k is
     /// assigned to \c t and all other nodes have zero supply value.
     ///
@@ -611,5 +614,6 @@
     }
 
-    /// \brief Return the flow map (the primal solution).
+    /// \brief Copy the flow values (the primal solution) into the
+    /// given map.
     ///
     /// This function copies the flow value on each arc into the given
@@ -635,5 +639,6 @@
     }
 
-    /// \brief Return the potential map (the dual solution).
+    /// \brief Copy the potential values (the dual solution) into the
+    /// given map.
     ///
     /// This function copies the potential (dual value) of each node
@@ -955,5 +960,5 @@
     }
 
-    // Execute the "Cancel And Tighten" method
+    // Execute the "Cancel-and-Tighten" method
     void startCancelAndTighten() {
       // Constants for the min mean cycle computations
Index: lemon/euler.h
===================================================================
--- lemon/euler.h	(revision 877)
+++ lemon/euler.h	(revision 919)
@@ -37,5 +37,5 @@
   ///Euler tour iterator for digraphs.
 
-  /// \ingroup graph_prop
+  /// \ingroup graph_properties
   ///This iterator provides an Euler tour (Eulerian circuit) of a \e directed
   ///graph (if there exists) and it converts to the \c Arc type of the digraph.
Index: lemon/grosso_locatelli_pullan_mc.h
===================================================================
--- lemon/grosso_locatelli_pullan_mc.h	(revision 918)
+++ lemon/grosso_locatelli_pullan_mc.h	(revision 918)
@@ -0,0 +1,840 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
+#define LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
+
+/// \ingroup approx_algs
+///
+/// \file
+/// \brief The iterated local search algorithm of Grosso, Locatelli, and Pullan
+/// for the maximum clique problem
+
+#include <vector>
+#include <limits>
+#include <lemon/core.h>
+#include <lemon/random.h>
+
+namespace lemon {
+
+  /// \addtogroup approx_algs
+  /// @{
+
+  /// \brief Implementation of the iterated local search algorithm of Grosso,
+  /// Locatelli, and Pullan for the maximum clique problem
+  ///
+  /// \ref GrossoLocatelliPullanMc implements the iterated local search
+  /// algorithm of Grosso, Locatelli, and Pullan for solving the \e maximum
+  /// \e clique \e problem \ref grosso08maxclique.
+  /// It is to find the largest complete subgraph (\e clique) in an
+  /// undirected graph, i.e., the largest set of nodes where each
+  /// pair of nodes is connected.
+  ///
+  /// This class provides a simple but highly efficient and robust heuristic
+  /// method that quickly finds a quite large clique, but not necessarily the
+  /// largest one.
+  /// The algorithm performs a certain number of iterations to find several
+  /// cliques and selects the largest one among them. Various limits can be
+  /// specified to control the running time and the effectiveness of the
+  /// search process.
+  ///
+  /// \tparam GR The undirected graph type the algorithm runs on.
+  ///
+  /// \note %GrossoLocatelliPullanMc provides three different node selection
+  /// rules, from which the most powerful one is used by default.
+  /// For more information, see \ref SelectionRule.
+  template <typename GR>
+  class GrossoLocatelliPullanMc
+  {
+  public:
+
+    /// \brief Constants for specifying the node selection rule.
+    ///
+    /// Enum type containing constants for specifying the node selection rule
+    /// for the \ref run() function.
+    ///
+    /// During the algorithm, nodes are selected for addition to the current
+    /// clique according to the applied rule.
+    /// In general, the PENALTY_BASED rule turned out to be the most powerful
+    /// and the most robust, thus it is the default option.
+    /// However, another selection rule can be specified using the \ref run()
+    /// function with the proper parameter.
+    enum SelectionRule {
+
+      /// A node is selected randomly without any evaluation at each step.
+      RANDOM,
+
+      /// A node of maximum degree is selected randomly at each step.
+      DEGREE_BASED,
+
+      /// A node of minimum penalty is selected randomly at each step.
+      /// The node penalties are updated adaptively after each stage of the
+      /// search process.
+      PENALTY_BASED
+    };
+
+    /// \brief Constants for the causes of search termination.
+    ///
+    /// Enum type containing constants for the different causes of search
+    /// termination. The \ref run() function returns one of these values.
+    enum TerminationCause {
+
+      /// The iteration count limit is reached.
+      ITERATION_LIMIT,
+
+      /// The step count limit is reached.
+      STEP_LIMIT,
+
+      /// The clique size limit is reached.
+      SIZE_LIMIT
+    };
+
+  private:
+
+    TEMPLATE_GRAPH_TYPEDEFS(GR);
+
+    typedef std::vector<int> IntVector;
+    typedef std::vector<char> BoolVector;
+    typedef std::vector<BoolVector> BoolMatrix;
+    // Note: vector<char> is used instead of vector<bool> for efficiency reasons
+
+    // The underlying graph
+    const GR &_graph;
+    IntNodeMap _id;
+
+    // Internal matrix representation of the graph
+    BoolMatrix _gr;
+    int _n;
+    
+    // Search options
+    bool _delta_based_restart;
+    int _restart_delta_limit;
+ 
+    // Search limits
+    int _iteration_limit;
+    int _step_limit;
+    int _size_limit;
+
+    // The current clique
+    BoolVector _clique;
+    int _size;
+
+    // The best clique found so far
+    BoolVector _best_clique;
+    int _best_size;
+
+    // The "distances" of the nodes from the current clique.
+    // _delta[u] is the number of nodes in the clique that are
+    // not connected with u.
+    IntVector _delta;
+
+    // The current tabu set
+    BoolVector _tabu;
+
+    // Random number generator
+    Random _rnd;
+
+  private:
+
+    // Implementation of the RANDOM node selection rule.
+    class RandomSelectionRule
+    {
+    private:
+
+      // References to the algorithm instance
+      const BoolVector &_clique;
+      const IntVector  &_delta;
+      const BoolVector &_tabu;
+      Random &_rnd;
+
+      // Pivot rule data
+      int _n;
+
+    public:
+
+      // Constructor
+      RandomSelectionRule(GrossoLocatelliPullanMc &mc) :
+        _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+        _rnd(mc._rnd), _n(mc._n)
+      {}
+
+      // Return a node index for a feasible add move or -1 if no one exists
+      int nextFeasibleAddNode() const {
+        int start_node = _rnd[_n];
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0 && !_tabu[i]) return i;
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0 && !_tabu[i]) return i;
+        }
+        return -1;
+      }
+
+      // Return a node index for a feasible swap move or -1 if no one exists
+      int nextFeasibleSwapNode() const {
+        int start_node = _rnd[_n];
+        for (int i = start_node; i != _n; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i]) return i;
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i]) return i;
+        }
+        return -1;
+      }
+
+      // Return a node index for an add move or -1 if no one exists
+      int nextAddNode() const {
+        int start_node = _rnd[_n];
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0) return i;
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0) return i;
+        }
+        return -1;
+      }
+
+      // Update internal data structures between stages (if necessary)
+      void update() {}
+
+    }; //class RandomSelectionRule
+
+
+    // Implementation of the DEGREE_BASED node selection rule.
+    class DegreeBasedSelectionRule
+    {
+    private:
+
+      // References to the algorithm instance
+      const BoolVector &_clique;
+      const IntVector  &_delta;
+      const BoolVector &_tabu;
+      Random &_rnd;
+
+      // Pivot rule data
+      int _n;
+      IntVector _deg;
+
+    public:
+
+      // Constructor
+      DegreeBasedSelectionRule(GrossoLocatelliPullanMc &mc) :
+        _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+        _rnd(mc._rnd), _n(mc._n), _deg(_n)
+      {
+        for (int i = 0; i != _n; i++) {
+          int d = 0;
+          BoolVector &row = mc._gr[i];
+          for (int j = 0; j != _n; j++) {
+            if (row[j]) d++;
+          }
+          _deg[i] = d;
+        }
+      }
+
+      // Return a node index for a feasible add move or -1 if no one exists
+      int nextFeasibleAddNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, max_deg = -1;
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0 && !_tabu[i] && _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0 && !_tabu[i] && _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        return node;
+      }
+
+      // Return a node index for a feasible swap move or -1 if no one exists
+      int nextFeasibleSwapNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, max_deg = -1;
+        for (int i = start_node; i != _n; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+              _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+              _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        return node;
+      }
+
+      // Return a node index for an add move or -1 if no one exists
+      int nextAddNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, max_deg = -1;
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0 && _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0 && _deg[i] > max_deg) {
+            node = i;
+            max_deg = _deg[i];
+          }
+        }
+        return node;
+      }
+
+      // Update internal data structures between stages (if necessary)
+      void update() {}
+
+    }; //class DegreeBasedSelectionRule
+
+
+    // Implementation of the PENALTY_BASED node selection rule.
+    class PenaltyBasedSelectionRule
+    {
+    private:
+
+      // References to the algorithm instance
+      const BoolVector &_clique;
+      const IntVector  &_delta;
+      const BoolVector &_tabu;
+      Random &_rnd;
+
+      // Pivot rule data
+      int _n;
+      IntVector _penalty;
+
+    public:
+
+      // Constructor
+      PenaltyBasedSelectionRule(GrossoLocatelliPullanMc &mc) :
+        _clique(mc._clique), _delta(mc._delta), _tabu(mc._tabu),
+        _rnd(mc._rnd), _n(mc._n), _penalty(_n, 0)
+      {}
+
+      // Return a node index for a feasible add move or -1 if no one exists
+      int nextFeasibleAddNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, min_p = std::numeric_limits<int>::max();
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0 && !_tabu[i] && _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0 && !_tabu[i] && _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        return node;
+      }
+
+      // Return a node index for a feasible swap move or -1 if no one exists
+      int nextFeasibleSwapNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, min_p = std::numeric_limits<int>::max();
+        for (int i = start_node; i != _n; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+              _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (!_clique[i] && _delta[i] == 1 && !_tabu[i] &&
+              _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        return node;
+      }
+
+      // Return a node index for an add move or -1 if no one exists
+      int nextAddNode() const {
+        int start_node = _rnd[_n];
+        int node = -1, min_p = std::numeric_limits<int>::max();
+        for (int i = start_node; i != _n; i++) {
+          if (_delta[i] == 0 && _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        for (int i = 0; i != start_node; i++) {
+          if (_delta[i] == 0 && _penalty[i] < min_p) {
+            node = i;
+            min_p = _penalty[i];
+          }
+        }
+        return node;
+      }
+
+      // Update internal data structures between stages (if necessary)
+      void update() {}
+
+    }; //class PenaltyBasedSelectionRule
+
+  public:
+
+    /// \brief Constructor.
+    ///
+    /// Constructor.
+    /// The global \ref rnd "random number generator instance" is used
+    /// during the algorithm.
+    ///
+    /// \param graph The undirected graph the algorithm runs on.
+    GrossoLocatelliPullanMc(const GR& graph) :
+      _graph(graph), _id(_graph), _rnd(rnd)
+    {
+      initOptions();
+    }
+
+    /// \brief Constructor with random seed.
+    ///
+    /// Constructor with random seed.
+    ///
+    /// \param graph The undirected graph the algorithm runs on.
+    /// \param seed Seed value for the internal random number generator
+    /// that is used during the algorithm.
+    GrossoLocatelliPullanMc(const GR& graph, int seed) :
+      _graph(graph), _id(_graph), _rnd(seed)
+    {
+      initOptions();
+    }
+
+    /// \brief Constructor with random number generator.
+    ///
+    /// Constructor with random number generator.
+    ///
+    /// \param graph The undirected graph the algorithm runs on.
+    /// \param random A random number generator that is used during the
+    /// algorithm.
+    GrossoLocatelliPullanMc(const GR& graph, const Random& random) :
+      _graph(graph), _id(_graph), _rnd(random)
+    {
+      initOptions();
+    }
+
+    /// \name Execution Control
+    /// The \ref run() function can be used to execute the algorithm.\n
+    /// The functions \ref iterationLimit(int), \ref stepLimit(int), and 
+    /// \ref sizeLimit(int) can be used to specify various limits for the
+    /// search process.
+    
+    /// @{
+    
+    /// \brief Sets the maximum number of iterations.
+    ///
+    /// This function sets the maximum number of iterations.
+    /// Each iteration of the algorithm finds a maximal clique (but not
+    /// necessarily the largest one) by performing several search steps
+    /// (node selections).
+    ///
+    /// This limit controls the running time and the success of the
+    /// algorithm. For larger values, the algorithm runs slower, but it more
+    /// likely finds larger cliques. For smaller values, the algorithm is
+    /// faster but probably gives worse results.
+    /// 
+    /// The default value is \c 1000.
+    /// \c -1 means that number of iterations is not limited.
+    ///
+    /// \warning You should specify a reasonable limit for the number of
+    /// iterations and/or the number of search steps.
+    ///
+    /// \return <tt>(*this)</tt>
+    ///
+    /// \sa stepLimit(int)
+    /// \sa sizeLimit(int)
+    GrossoLocatelliPullanMc& iterationLimit(int limit) {
+      _iteration_limit = limit;
+      return *this;
+    }
+    
+    /// \brief Sets the maximum number of search steps.
+    ///
+    /// This function sets the maximum number of elementary search steps.
+    /// Each iteration of the algorithm finds a maximal clique (but not
+    /// necessarily the largest one) by performing several search steps
+    /// (node selections).
+    ///
+    /// This limit controls the running time and the success of the
+    /// algorithm. For larger values, the algorithm runs slower, but it more
+    /// likely finds larger cliques. For smaller values, the algorithm is
+    /// faster but probably gives worse results.
+    /// 
+    /// The default value is \c -1, which means that number of steps
+    /// is not limited explicitly. However, the number of iterations is
+    /// limited and each iteration performs a finite number of search steps.
+    ///
+    /// \warning You should specify a reasonable limit for the number of
+    /// iterations and/or the number of search steps.
+    ///
+    /// \return <tt>(*this)</tt>
+    ///
+    /// \sa iterationLimit(int)
+    /// \sa sizeLimit(int)
+    GrossoLocatelliPullanMc& stepLimit(int limit) {
+      _step_limit = limit;
+      return *this;
+    }
+    
+    /// \brief Sets the desired clique size.
+    ///
+    /// This function sets the desired clique size that serves as a search
+    /// limit. If a clique of this size (or a larger one) is found, then the
+    /// algorithm terminates.
+    /// 
+    /// This function is especially useful if you know an exact upper bound
+    /// for the size of the cliques in the graph or if any clique above 
+    /// a certain size limit is sufficient for your application.
+    /// 
+    /// The default value is \c -1, which means that the size limit is set to
+    /// the number of nodes in the graph.
+    ///
+    /// \return <tt>(*this)</tt>
+    ///
+    /// \sa iterationLimit(int)
+    /// \sa stepLimit(int)
+    GrossoLocatelliPullanMc& sizeLimit(int limit) {
+      _size_limit = limit;
+      return *this;
+    }
+    
+    /// \brief The maximum number of iterations.
+    ///
+    /// This function gives back the maximum number of iterations.
+    /// \c -1 means that no limit is specified.
+    ///
+    /// \sa iterationLimit(int)
+    int iterationLimit() const {
+      return _iteration_limit;
+    }
+    
+    /// \brief The maximum number of search steps.
+    ///
+    /// This function gives back the maximum number of search steps.
+    /// \c -1 means that no limit is specified.
+    ///
+    /// \sa stepLimit(int)
+    int stepLimit() const {
+      return _step_limit;
+    }
+    
+    /// \brief The desired clique size.
+    ///
+    /// This function gives back the desired clique size that serves as a
+    /// search limit. \c -1 means that this limit is set to the number of
+    /// nodes in the graph.
+    ///
+    /// \sa sizeLimit(int)
+    int sizeLimit() const {
+      return _size_limit;
+    }
+
+    /// \brief Runs the algorithm.
+    ///
+    /// This function runs the algorithm. If one of the specified limits
+    /// is reached, the search process terminates.
+    ///
+    /// \param rule The node selection rule. For more information, see
+    /// \ref SelectionRule.
+    ///
+    /// \return The termination cause of the search. For more information,
+    /// see \ref TerminationCause.
+    TerminationCause run(SelectionRule rule = PENALTY_BASED)
+    {
+      init();
+      switch (rule) {
+        case RANDOM:
+          return start<RandomSelectionRule>();
+        case DEGREE_BASED:
+          return start<DegreeBasedSelectionRule>();
+        default:
+          return start<PenaltyBasedSelectionRule>();
+      }
+    }
+
+    /// @}
+
+    /// \name Query Functions
+    /// The results of the algorithm can be obtained using these functions.\n
+    /// The run() function must be called before using them. 
+
+    /// @{
+
+    /// \brief The size of the found clique
+    ///
+    /// This function returns the size of the found clique.
+    ///
+    /// \pre run() must be called before using this function.
+    int cliqueSize() const {
+      return _best_size;
+    }
+
+    /// \brief Gives back the found clique in a \c bool node map
+    ///
+    /// This function gives back the characteristic vector of the found
+    /// clique in the given node map.
+    /// It must be a \ref concepts::WriteMap "writable" node map with
+    /// \c bool (or convertible) value type.
+    ///
+    /// \pre run() must be called before using this function.
+    template <typename CliqueMap>
+    void cliqueMap(CliqueMap &map) const {
+      for (NodeIt n(_graph); n != INVALID; ++n) {
+        map[n] = static_cast<bool>(_best_clique[_id[n]]);
+      }
+    }
+
+    /// \brief Iterator to list the nodes of the found clique
+    ///
+    /// This iterator class lists the nodes of the found clique.
+    /// Before using it, you must allocate a GrossoLocatelliPullanMc instance
+    /// and call its \ref GrossoLocatelliPullanMc::run() "run()" method.
+    ///
+    /// The following example prints out the IDs of the nodes in the found
+    /// clique.
+    /// \code
+    ///   GrossoLocatelliPullanMc<Graph> mc(g);
+    ///   mc.run();
+    ///   for (GrossoLocatelliPullanMc<Graph>::CliqueNodeIt n(mc);
+    ///        n != INVALID; ++n)
+    ///   {
+    ///     std::cout << g.id(n) << std::endl;
+    ///   }
+    /// \endcode
+    class CliqueNodeIt
+    {
+    private:
+      NodeIt _it;
+      BoolNodeMap _map;
+
+    public:
+
+      /// Constructor
+
+      /// Constructor.
+      /// \param mc The algorithm instance.
+      CliqueNodeIt(const GrossoLocatelliPullanMc &mc)
+       : _map(mc._graph)
+      {
+        mc.cliqueMap(_map);
+        for (_it = NodeIt(mc._graph); _it != INVALID && !_map[_it]; ++_it) ;
+      }
+
+      /// Conversion to \c Node
+      operator Node() const { return _it; }
+
+      bool operator==(Invalid) const { return _it == INVALID; }
+      bool operator!=(Invalid) const { return _it != INVALID; }
+
+      /// Next node
+      CliqueNodeIt &operator++() {
+        for (++_it; _it != INVALID && !_map[_it]; ++_it) ;
+        return *this;
+      }
+
+      /// Postfix incrementation
+
+      /// Postfix incrementation.
+      ///
+      /// \warning This incrementation returns a \c Node, not a
+      /// \c CliqueNodeIt as one may expect.
+      typename GR::Node operator++(int) {
+        Node n=*this;
+        ++(*this);
+        return n;
+      }
+
+    };
+
+    /// @}
+
+  private:
+  
+    // Initialize search options and limits
+    void initOptions() {
+      // Search options
+      _delta_based_restart = true;
+      _restart_delta_limit = 4;
+     
+      // Search limits
+      _iteration_limit = 1000;
+      _step_limit = -1;             // this is disabled by default
+      _size_limit = -1;             // this is disabled by default
+    }
+
+    // Adds a node to the current clique
+    void addCliqueNode(int u) {
+      if (_clique[u]) return;
+      _clique[u] = true;
+      _size++;
+      BoolVector &row = _gr[u];
+      for (int i = 0; i != _n; i++) {
+        if (!row[i]) _delta[i]++;
+      }
+    }
+
+    // Removes a node from the current clique
+    void delCliqueNode(int u) {
+      if (!_clique[u]) return;
+      _clique[u] = false;
+      _size--;
+      BoolVector &row = _gr[u];
+      for (int i = 0; i != _n; i++) {
+        if (!row[i]) _delta[i]--;
+      }
+    }
+
+    // Initialize data structures
+    void init() {
+      _n = countNodes(_graph);
+      int ui = 0;
+      for (NodeIt u(_graph); u != INVALID; ++u) {
+        _id[u] = ui++;
+      }
+      _gr.clear();
+      _gr.resize(_n, BoolVector(_n, false));
+      ui = 0;
+      for (NodeIt u(_graph); u != INVALID; ++u) {
+        for (IncEdgeIt e(_graph, u); e != INVALID; ++e) {
+          int vi = _id[_graph.runningNode(e)];
+          _gr[ui][vi] = true;
+          _gr[vi][ui] = true;
+        }
+        ++ui;
+      }
+
+      _clique.clear();
+      _clique.resize(_n, false);
+      _size = 0;
+      _best_clique.clear();
+      _best_clique.resize(_n, false);
+      _best_size = 0;
+      _delta.clear();
+      _delta.resize(_n, 0);
+      _tabu.clear();
+      _tabu.resize(_n, false);
+    }
+
+    // Executes the algorithm
+    template <typename SelectionRuleImpl>
+    TerminationCause start() {
+      if (_n == 0) return SIZE_LIMIT;
+      if (_n == 1) {
+        _best_clique[0] = true;
+        _best_size = 1;
+        return SIZE_LIMIT;
+      }
+
+      // Iterated local search algorithm
+      const int max_size = _size_limit >= 0 ? _size_limit : _n;
+      const int max_restart = _iteration_limit >= 0 ?
+        _iteration_limit : std::numeric_limits<int>::max();
+      const int max_select = _step_limit >= 0 ?
+        _step_limit : std::numeric_limits<int>::max();
+
+      SelectionRuleImpl sel_method(*this);
+      int select = 0, restart = 0;
+      IntVector restart_nodes;
+      while (select < max_select && restart < max_restart) {
+
+        // Perturbation/restart
+        restart++;
+        if (_delta_based_restart) {
+          restart_nodes.clear();
+          for (int i = 0; i != _n; i++) {
+            if (_delta[i] >= _restart_delta_limit)
+              restart_nodes.push_back(i);
+          }
+        }
+        int rs_node = -1;
+        if (restart_nodes.size() > 0) {
+          rs_node = restart_nodes[_rnd[restart_nodes.size()]];
+        } else {
+          rs_node = _rnd[_n];
+        }
+        BoolVector &row = _gr[rs_node];
+        for (int i = 0; i != _n; i++) {
+          if (_clique[i] && !row[i]) delCliqueNode(i);
+        }
+        addCliqueNode(rs_node);
+
+        // Local search
+        _tabu.clear();
+        _tabu.resize(_n, false);
+        bool tabu_empty = true;
+        int max_swap = _size;
+        while (select < max_select) {
+          select++;
+          int u;
+          if ((u = sel_method.nextFeasibleAddNode()) != -1) {
+            // Feasible add move
+            addCliqueNode(u);
+            if (tabu_empty) max_swap = _size;
+          }
+          else if ((u = sel_method.nextFeasibleSwapNode()) != -1) {
+            // Feasible swap move
+            int v = -1;
+            BoolVector &row = _gr[u];
+            for (int i = 0; i != _n; i++) {
+              if (_clique[i] && !row[i]) {
+                v = i;
+                break;
+              }
+            }
+            addCliqueNode(u);
+            delCliqueNode(v);
+            _tabu[v] = true;
+            tabu_empty = false;
+            if (--max_swap <= 0) break;
+          }
+          else if ((u = sel_method.nextAddNode()) != -1) {
+            // Non-feasible add move
+            addCliqueNode(u);
+          }
+          else break;
+        }
+        if (_size > _best_size) {
+          _best_clique = _clique;
+          _best_size = _size;
+          if (_best_size >= max_size) return SIZE_LIMIT;
+        }
+        sel_method.update();
+      }
+
+      return (restart >= max_restart ? ITERATION_LIMIT : STEP_LIMIT);
+    }
+
+  }; //class GrossoLocatelliPullanMc
+
+  ///@}
+
+} //namespace lemon
+
+#endif //LEMON_GROSSO_LOCATELLI_PULLAN_MC_H
Index: lemon/hao_orlin.h
===================================================================
--- lemon/hao_orlin.h	(revision 877)
+++ lemon/hao_orlin.h	(revision 915)
@@ -54,6 +54,6 @@
   /// preflow push-relabel algorithm. Our implementation calculates
   /// the minimum cut in \f$ O(n^2\sqrt{m}) \f$ time (we use the
-  /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. The
-  /// purpose of such algorithm is e.g. testing network reliability.
+  /// highest-label rule), or in \f$O(nm)\f$ for unit capacities. A notable
+  /// use of this algorithm is testing network reliability.
   ///
   /// For an undirected graph you can run just the first phase of the
@@ -913,4 +913,6 @@
     /// source-side (i.e. a set \f$ X\subsetneq V \f$ with
     /// \f$ source \in X \f$ and minimal outgoing capacity).
+    /// It updates the stored cut if (and only if) the newly found one
+    /// is better.
     ///
     /// \pre \ref init() must be called before using this function.
@@ -925,4 +927,6 @@
     /// sink-side (i.e. a set \f$ X\subsetneq V \f$ with
     /// \f$ source \notin X \f$ and minimal outgoing capacity).
+    /// It updates the stored cut if (and only if) the newly found one
+    /// is better.
     ///
     /// \pre \ref init() must be called before using this function.
@@ -934,6 +938,6 @@
     /// \brief Run the algorithm.
     ///
-    /// This function runs the algorithm. It finds nodes \c source and
-    /// \c target arbitrarily and then calls \ref init(), \ref calculateOut()
+    /// This function runs the algorithm. It chooses source node,
+    /// then calls \ref init(), \ref calculateOut()
     /// and \ref calculateIn().
     void run() {
@@ -945,7 +949,7 @@
     /// \brief Run the algorithm.
     ///
-    /// This function runs the algorithm. It uses the given \c source node,
-    /// finds a proper \c target node and then calls the \ref init(),
-    /// \ref calculateOut() and \ref calculateIn().
+    /// This function runs the algorithm. It calls \ref init(),
+    /// \ref calculateOut() and \ref calculateIn() with the given
+    /// source node.
     void run(const Node& s) {
       init(s);
@@ -966,5 +970,7 @@
     /// \brief Return the value of the minimum cut.
     ///
-    /// This function returns the value of the minimum cut.
+    /// This function returns the value of the best cut found by the
+    /// previously called \ref run(), \ref calculateOut() or \ref
+    /// calculateIn().
     ///
     /// \pre \ref run(), \ref calculateOut() or \ref calculateIn()
@@ -977,7 +983,11 @@
     /// \brief Return a minimum cut.
     ///
-    /// This function sets \c cutMap to the characteristic vector of a
-    /// minimum value cut: it will give a non-empty set \f$ X\subsetneq V \f$
-    /// with minimal outgoing capacity (i.e. \c cutMap will be \c true exactly
+    /// This function gives the best cut found by the
+    /// previously called \ref run(), \ref calculateOut() or \ref
+    /// calculateIn().
+    ///
+    /// It sets \c cutMap to the characteristic vector of the found
+    /// minimum value cut - a non-empty set \f$ X\subsetneq V \f$
+    /// of minimum outgoing capacity (i.e. \c cutMap will be \c true exactly
     /// for the nodes of \f$ X \f$).
     ///
Index: lemon/hartmann_orlin_mmc.h
===================================================================
--- lemon/hartmann_orlin_mmc.h	(revision 879)
+++ lemon/hartmann_orlin_mmc.h	(revision 1002)
@@ -99,5 +99,5 @@
   /// This class implements the Hartmann-Orlin algorithm for finding
   /// a directed cycle of minimum mean cost in a digraph
-  /// \ref amo93networkflows, \ref dasdan98minmeancycle.
+  /// \ref hartmann93finding, \ref dasdan98minmeancycle.
   /// It is an improved version of \ref KarpMmc "Karp"'s original algorithm,
   /// it applies an efficient early termination scheme.
Index: lemon/howard_mmc.h
===================================================================
--- lemon/howard_mmc.h	(revision 877)
+++ lemon/howard_mmc.h	(revision 1002)
@@ -99,5 +99,5 @@
   /// This class implements Howard's policy iteration algorithm for finding
   /// a directed cycle of minimum mean cost in a digraph
-  /// \ref amo93networkflows, \ref dasdan98minmeancycle.
+  /// \ref dasdan98minmeancycle, \ref dasdan04experimental.
   /// This class provides the most efficient algorithm for the
   /// minimum mean cycle problem, though the best known theoretical
Index: lemon/karp_mmc.h
===================================================================
--- lemon/karp_mmc.h	(revision 877)
+++ lemon/karp_mmc.h	(revision 1002)
@@ -99,5 +99,5 @@
   /// This class implements Karp's algorithm for finding a directed
   /// cycle of minimum mean cost in a digraph
-  /// \ref amo93networkflows, \ref dasdan98minmeancycle.
+  /// \ref karp78characterization, \ref dasdan98minmeancycle.
   /// It runs in time O(ne) and uses space O(n<sup>2</sup>+e).
   ///
Index: lemon/kruskal.h
===================================================================
--- lemon/kruskal.h	(revision 584)
+++ lemon/kruskal.h	(revision 921)
@@ -31,7 +31,4 @@
 ///\file
 ///\brief Kruskal's algorithm to compute a minimum cost spanning tree
-///
-///Kruskal's algorithm to compute a minimum cost spanning tree.
-///
 
 namespace lemon {
Index: mon/lemon.pc.cmake
===================================================================
--- lemon/lemon.pc.cmake	(revision 908)
+++ 	(revision )
@@ -1,10 +1,0 @@
-prefix=@CMAKE_INSTALL_PREFIX@
-exec_prefix=@CMAKE_INSTALL_PREFIX@/bin
-libdir=@CMAKE_INSTALL_PREFIX@/lib
-includedir=@CMAKE_INSTALL_PREFIX@/include
-
-Name: @PROJECT_NAME@
-Description: Library for Efficient Modeling and Optimization in Networks
-Version: @PROJECT_VERSION@
-Libs: -L${libdir} -lemon @GLPK_LIBS@ @CPLEX_LIBS@ @SOPLEX_LIBS@ @CLP_LIBS@ @CBC_LIBS@
-Cflags: -I${includedir}
Index: lemon/lemon.pc.in
===================================================================
--- lemon/lemon.pc.in	(revision 658)
+++ lemon/lemon.pc.in	(revision 981)
@@ -1,10 +1,10 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=@CMAKE_INSTALL_PREFIX@/bin
+libdir=@CMAKE_INSTALL_PREFIX@/lib
+includedir=@CMAKE_INSTALL_PREFIX@/include
 
-Name: @PACKAGE_NAME@
+Name: @PROJECT_NAME@
 Description: Library for Efficient Modeling and Optimization in Networks
-Version: @PACKAGE_VERSION@
+Version: @PROJECT_VERSION@
 Libs: -L${libdir} -lemon @GLPK_LIBS@ @CPLEX_LIBS@ @SOPLEX_LIBS@ @CLP_LIBS@ @CBC_LIBS@
 Cflags: -I${includedir}
Index: lemon/max_cardinality_search.h
===================================================================
--- lemon/max_cardinality_search.h	(revision 977)
+++ lemon/max_cardinality_search.h	(revision 977)
@@ -0,0 +1,793 @@
+/* -*- C++ -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_MAX_CARDINALITY_SEARCH_H
+#define LEMON_MAX_CARDINALITY_SEARCH_H
+
+
+/// \ingroup search
+/// \file 
+/// \brief Maximum cardinality search in undirected digraphs.
+
+#include <lemon/bin_heap.h>
+#include <lemon/bucket_heap.h>
+
+#include <lemon/error.h>
+#include <lemon/maps.h>
+
+#include <functional>
+
+namespace lemon {
+
+  /// \brief Default traits class of MaxCardinalitySearch class.
+  ///
+  /// Default traits class of MaxCardinalitySearch class.
+  /// \param Digraph Digraph type.
+  /// \param CapacityMap Type of capacity map.
+  template <typename GR, typename CAP>
+  struct MaxCardinalitySearchDefaultTraits {
+    /// The digraph type the algorithm runs on. 
+    typedef GR Digraph;
+
+    template <typename CM>
+    struct CapMapSelector {
+
+      typedef CM CapacityMap;
+
+      static CapacityMap *createCapacityMap(const Digraph& g) {
+	return new CapacityMap(g);
+      }
+    };
+
+    template <typename CM>
+    struct CapMapSelector<ConstMap<CM, Const<int, 1> > > {
+
+      typedef ConstMap<CM, Const<int, 1> > CapacityMap;
+
+      static CapacityMap *createCapacityMap(const Digraph&) {
+	return new CapacityMap;
+      }
+    };
+
+    /// \brief The type of the map that stores the arc capacities.
+    ///
+    /// The type of the map that stores the arc capacities.
+    /// It must meet the \ref concepts::ReadMap "ReadMap" concept.
+    typedef typename CapMapSelector<CAP>::CapacityMap CapacityMap;
+
+    /// \brief The type of the capacity of the arcs.
+    typedef typename CapacityMap::Value Value;
+
+    /// \brief Instantiates a CapacityMap.
+    ///
+    /// This function instantiates a \ref CapacityMap.
+    /// \param digraph is the digraph, to which we would like to define
+    /// the CapacityMap.
+    static CapacityMap *createCapacityMap(const Digraph& digraph) {
+      return CapMapSelector<CapacityMap>::createCapacityMap(digraph);
+    }
+
+    /// \brief The cross reference type used by heap.
+    ///
+    /// The cross reference type used by heap.
+    /// Usually it is \c Digraph::NodeMap<int>.
+    typedef typename Digraph::template NodeMap<int> HeapCrossRef;
+
+    /// \brief Instantiates a HeapCrossRef.
+    ///
+    /// This function instantiates a \ref HeapCrossRef. 
+    /// \param digraph is the digraph, to which we would like to define the 
+    /// HeapCrossRef.
+    static HeapCrossRef *createHeapCrossRef(const Digraph &digraph) {
+      return new HeapCrossRef(digraph);
+    }
+    
+    template <typename CapacityMap>
+    struct HeapSelector {
+      template <typename Value, typename Ref>
+      struct Selector { 
+        typedef BinHeap<Value, Ref, std::greater<Value> > Heap;
+      };
+    };
+
+    template <typename CapacityKey>
+    struct HeapSelector<ConstMap<CapacityKey, Const<int, 1> > > {
+      template <typename Value, typename Ref>
+      struct Selector {
+        typedef BucketHeap<Ref, false > Heap;
+      };
+    };
+
+    /// \brief The heap type used by MaxCardinalitySearch algorithm.
+    ///
+    /// The heap type used by MaxCardinalitySearch algorithm. It should
+    /// maximalize the priorities. The default heap type is
+    /// the \ref BinHeap, but it is specialized when the
+    /// CapacityMap is ConstMap<Digraph::Node, Const<int, 1> >
+    /// to BucketHeap.
+    ///
+    /// \sa MaxCardinalitySearch
+    typedef typename HeapSelector<CapacityMap>
+    ::template Selector<Value, HeapCrossRef>
+    ::Heap Heap;
+
+    /// \brief Instantiates a Heap.
+    ///
+    /// This function instantiates a \ref Heap. 
+    /// \param crossref The cross reference of the heap.
+    static Heap *createHeap(HeapCrossRef& crossref) {
+      return new Heap(crossref);
+    }
+
+    /// \brief The type of the map that stores whether a node is processed.
+    ///
+    /// The type of the map that stores whether a node is processed.
+    /// It must meet the \ref concepts::WriteMap "WriteMap" concept.
+    /// By default it is a NullMap.
+    typedef NullMap<typename Digraph::Node, bool> ProcessedMap;
+
+    /// \brief Instantiates a ProcessedMap.
+    ///
+    /// This function instantiates a \ref ProcessedMap. 
+    /// \param digraph is the digraph, to which
+    /// we would like to define the \ref ProcessedMap
+#ifdef DOXYGEN
+    static ProcessedMap *createProcessedMap(const Digraph &digraph)
+#else
+    static ProcessedMap *createProcessedMap(const Digraph &)
+#endif
+    {
+      return new ProcessedMap();
+    }
+
+    /// \brief The type of the map that stores the cardinalities of the nodes.
+    /// 
+    /// The type of the map that stores the cardinalities of the nodes.
+    /// It must meet the \ref concepts::WriteMap "WriteMap" concept.
+    typedef typename Digraph::template NodeMap<Value> CardinalityMap;
+
+    /// \brief Instantiates a CardinalityMap.
+    ///
+    /// This function instantiates a \ref CardinalityMap. 
+    /// \param digraph is the digraph, to which we would like to define the \ref 
+    /// CardinalityMap
+    static CardinalityMap *createCardinalityMap(const Digraph &digraph) {
+      return new CardinalityMap(digraph);
+    }
+
+
+  };
+  
+  /// \ingroup search
+  ///
+  /// \brief Maximum Cardinality Search algorithm class.
+  ///
+  /// This class provides an efficient implementation of Maximum Cardinality 
+  /// Search algorithm. The maximum cardinality search first chooses any 
+  /// node of the digraph. Then every time it chooses one unprocessed node
+  /// with maximum cardinality, i.e the sum of capacities on out arcs to the nodes
+  /// which were previusly processed.
+  /// If there is a cut in the digraph the algorithm should choose
+  /// again any unprocessed node of the digraph.
+
+  /// The arc capacities are passed to the algorithm using a
+  /// \ref concepts::ReadMap "ReadMap", so it is easy to change it to any 
+  /// kind of capacity.
+  ///
+  /// The type of the capacity is determined by the \ref 
+  /// concepts::ReadMap::Value "Value" of the capacity map.
+  ///
+  /// It is also possible to change the underlying priority heap.
+  ///
+  ///
+  /// \param GR The digraph type the algorithm runs on. The value of
+  /// Digraph is not used directly by the search algorithm, it 
+  /// is only passed to \ref MaxCardinalitySearchDefaultTraits.
+  /// \param CAP This read-only ArcMap determines the capacities of 
+  /// the arcs. It is read once for each arc, so the map may involve in
+  /// relatively time consuming process to compute the arc capacity if
+  /// it is necessary. The default map type is \ref
+  /// ConstMap "ConstMap<concepts::Digraph::Arc, Const<int,1> >". The value
+  /// of CapacityMap is not used directly by search algorithm, it is only 
+  /// passed to \ref MaxCardinalitySearchDefaultTraits.  
+  /// \param TR Traits class to set various data types used by the 
+  /// algorithm.  The default traits class is 
+  /// \ref MaxCardinalitySearchDefaultTraits 
+  /// "MaxCardinalitySearchDefaultTraits<GR, CAP>".  
+  /// See \ref MaxCardinalitySearchDefaultTraits 
+  /// for the documentation of a MaxCardinalitySearch traits class.
+
+#ifdef DOXYGEN
+  template <typename GR, typename CAP, typename TR>
+#else
+  template <typename GR, typename CAP = 
+	    ConstMap<typename GR::Arc, Const<int,1> >,
+	    typename TR = 
+            MaxCardinalitySearchDefaultTraits<GR, CAP> >
+#endif
+  class MaxCardinalitySearch {
+  public:
+
+    typedef TR Traits;
+    ///The type of the underlying digraph.
+    typedef typename Traits::Digraph Digraph;
+    
+    ///The type of the capacity of the arcs.
+    typedef typename Traits::CapacityMap::Value Value;
+    ///The type of the map that stores the arc capacities.
+    typedef typename Traits::CapacityMap CapacityMap;
+    ///The type of the map indicating if a node is processed.
+    typedef typename Traits::ProcessedMap ProcessedMap;
+    ///The type of the map that stores the cardinalities of the nodes.
+    typedef typename Traits::CardinalityMap CardinalityMap;
+    ///The cross reference type used for the current heap.
+    typedef typename Traits::HeapCrossRef HeapCrossRef;
+    ///The heap type used by the algorithm. It maximizes the priorities.
+    typedef typename Traits::Heap Heap;
+  private:
+    // Pointer to the underlying digraph.
+    const Digraph *_graph;
+    // Pointer to the capacity map
+    const CapacityMap *_capacity;
+    // Indicates if \ref _capacity is locally allocated (\c true) or not.
+    bool local_capacity;
+    // Pointer to the map of cardinality.
+    CardinalityMap *_cardinality;
+    // Indicates if \ref _cardinality is locally allocated (\c true) or not.
+    bool local_cardinality;
+    // Pointer to the map of processed status of the nodes.
+    ProcessedMap *_processed;
+    // Indicates if \ref _processed is locally allocated (\c true) or not.
+    bool local_processed;
+    // Pointer to the heap cross references.
+    HeapCrossRef *_heap_cross_ref;
+    // Indicates if \ref _heap_cross_ref is locally allocated (\c true) or not.
+    bool local_heap_cross_ref;
+    // Pointer to the heap.
+    Heap *_heap;
+    // Indicates if \ref _heap is locally allocated (\c true) or not.
+    bool local_heap;
+
+  public :
+
+    typedef MaxCardinalitySearch Create;
+ 
+    ///\name Named template parameters
+
+    ///@{
+
+    template <class T>
+    struct DefCapacityMapTraits : public Traits {
+      typedef T CapacityMap;
+      static CapacityMap *createCapacityMap(const Digraph &) {
+       	LEMON_ASSERT(false,"Uninitialized parameter.");
+	return 0;
+      }
+    };
+    /// \brief \ref named-templ-param "Named parameter" for setting 
+    /// CapacityMap type
+    ///
+    /// \ref named-templ-param "Named parameter" for setting CapacityMap type
+    /// for the algorithm.
+    template <class T>
+    struct SetCapacityMap 
+      : public MaxCardinalitySearch<Digraph, CapacityMap, 
+                                    DefCapacityMapTraits<T> > { 
+      typedef MaxCardinalitySearch<Digraph, CapacityMap, 
+                                   DefCapacityMapTraits<T> > Create;
+    };
+
+    template <class T>
+    struct DefCardinalityMapTraits : public Traits {
+      typedef T CardinalityMap;
+      static CardinalityMap *createCardinalityMap(const Digraph &) 
+      {
+	LEMON_ASSERT(false,"Uninitialized parameter.");
+	return 0;
+      }
+    };
+    /// \brief \ref named-templ-param "Named parameter" for setting 
+    /// CardinalityMap type
+    ///
+    /// \ref named-templ-param "Named parameter" for setting CardinalityMap 
+    /// type for the algorithm.
+    template <class T>
+    struct SetCardinalityMap 
+      : public MaxCardinalitySearch<Digraph, CapacityMap, 
+                                    DefCardinalityMapTraits<T> > { 
+      typedef MaxCardinalitySearch<Digraph, CapacityMap, 
+                                   DefCardinalityMapTraits<T> > Create;
+    };
+    
+    template <class T>
+    struct DefProcessedMapTraits : public Traits {
+      typedef T ProcessedMap;
+      static ProcessedMap *createProcessedMap(const Digraph &) {
+       	LEMON_ASSERT(false,"Uninitialized parameter.");
+	return 0;
+      }
+    };
+    /// \brief \ref named-templ-param "Named parameter" for setting 
+    /// ProcessedMap type
+    ///
+    /// \ref named-templ-param "Named parameter" for setting ProcessedMap type
+    /// for the algorithm.
+    template <class T>
+    struct SetProcessedMap 
+      : public MaxCardinalitySearch<Digraph, CapacityMap, 
+                                    DefProcessedMapTraits<T> > { 
+      typedef MaxCardinalitySearch<Digraph, CapacityMap, 
+                                   DefProcessedMapTraits<T> > Create;
+    };
+    
+    template <class H, class CR>
+    struct DefHeapTraits : public Traits {
+      typedef CR HeapCrossRef;
+      typedef H Heap;
+      static HeapCrossRef *createHeapCrossRef(const Digraph &) {
+     	LEMON_ASSERT(false,"Uninitialized parameter.");
+	return 0;
+      }
+      static Heap *createHeap(HeapCrossRef &) {
+       	LEMON_ASSERT(false,"Uninitialized parameter.");
+	return 0;
+      }
+    };
+    /// \brief \ref named-templ-param "Named parameter" for setting heap 
+    /// and cross reference type
+    ///
+    /// \ref named-templ-param "Named parameter" for setting heap and cross 
+    /// reference type for the algorithm.
+    template <class H, class CR = typename Digraph::template NodeMap<int> >
+    struct SetHeap
+      : public MaxCardinalitySearch<Digraph, CapacityMap, 
+                                    DefHeapTraits<H, CR> > { 
+      typedef MaxCardinalitySearch< Digraph, CapacityMap, 
+                                    DefHeapTraits<H, CR> > Create;
+    };
+
+    template <class H, class CR>
+    struct DefStandardHeapTraits : public Traits {
+      typedef CR HeapCrossRef;
+      typedef H Heap;
+      static HeapCrossRef *createHeapCrossRef(const Digraph &digraph) {
+	return new HeapCrossRef(digraph);
+      }
+      static Heap *createHeap(HeapCrossRef &crossref) {
+	return new Heap(crossref);
+      }
+    };
+
+    /// \brief \ref named-templ-param "Named parameter" for setting heap and 
+    /// cross reference type with automatic allocation
+    ///
+    /// \ref named-templ-param "Named parameter" for setting heap and cross 
+    /// reference type. It can allocate the heap and the cross reference 
+    /// object if the cross reference's constructor waits for the digraph as 
+    /// parameter and the heap's constructor waits for the cross reference.
+    template <class H, class CR = typename Digraph::template NodeMap<int> >
+    struct SetStandardHeap
+      : public MaxCardinalitySearch<Digraph, CapacityMap, 
+                                    DefStandardHeapTraits<H, CR> > { 
+      typedef MaxCardinalitySearch<Digraph, CapacityMap, 
+                                   DefStandardHeapTraits<H, CR> > 
+      Create;
+    };
+    
+    ///@}
+
+
+  protected:
+
+    MaxCardinalitySearch() {}
+
+  public:      
+    
+    /// \brief Constructor.
+    ///
+    ///\param digraph the digraph the algorithm will run on.
+    ///\param capacity the capacity map used by the algorithm.
+    MaxCardinalitySearch(const Digraph& digraph,
+			 const CapacityMap& capacity) :
+      _graph(&digraph),
+      _capacity(&capacity), local_capacity(false),
+      _cardinality(0), local_cardinality(false),
+      _processed(0), local_processed(false),
+      _heap_cross_ref(0), local_heap_cross_ref(false),
+      _heap(0), local_heap(false)
+    { }
+
+    /// \brief Constructor.
+    ///
+    ///\param digraph the digraph the algorithm will run on.
+    ///
+    ///A constant 1 capacity map will be allocated.
+    MaxCardinalitySearch(const Digraph& digraph) :
+      _graph(&digraph),
+      _capacity(0), local_capacity(false),
+      _cardinality(0), local_cardinality(false),
+      _processed(0), local_processed(false),
+      _heap_cross_ref(0), local_heap_cross_ref(false),
+      _heap(0), local_heap(false)
+    { }
+
+    /// \brief Destructor.
+    ~MaxCardinalitySearch() {
+      if(local_capacity) delete _capacity;
+      if(local_cardinality) delete _cardinality;
+      if(local_processed) delete _processed;
+      if(local_heap_cross_ref) delete _heap_cross_ref;
+      if(local_heap) delete _heap;
+    }
+
+    /// \brief Sets the capacity map.
+    ///
+    /// Sets the capacity map.
+    /// \return <tt> (*this) </tt>
+    MaxCardinalitySearch &capacityMap(const CapacityMap &m) {
+      if (local_capacity) {
+	delete _capacity;
+	local_capacity=false;
+      }
+      _capacity=&m;
+      return *this;
+    }
+
+    /// \brief Returns a const reference to the capacity map.
+    ///
+    /// Returns a const reference to the capacity map used by
+    /// the algorithm.
+    const CapacityMap &capacityMap() const {
+      return *_capacity;
+    }
+
+    /// \brief Sets the map storing the cardinalities calculated by the 
+    /// algorithm.
+    ///
+    /// Sets the map storing the cardinalities calculated by the algorithm.
+    /// If you don't use this function before calling \ref run(),
+    /// it will allocate one. The destuctor deallocates this
+    /// automatically allocated map, of course.
+    /// \return <tt> (*this) </tt>
+    MaxCardinalitySearch &cardinalityMap(CardinalityMap &m) {
+      if(local_cardinality) {
+	delete _cardinality;
+	local_cardinality=false;
+      }
+      _cardinality = &m;
+      return *this;
+    }
+
+    /// \brief Sets the map storing the processed nodes.
+    ///
+    /// Sets the map storing the processed nodes.
+    /// If you don't use this function before calling \ref run(),
+    /// it will allocate one. The destuctor deallocates this
+    /// automatically allocated map, of course.
+    /// \return <tt> (*this) </tt>
+    MaxCardinalitySearch &processedMap(ProcessedMap &m) 
+    {
+      if(local_processed) {
+	delete _processed;
+	local_processed=false;
+      }
+      _processed = &m;
+      return *this;
+    }
+
+    /// \brief Returns a const reference to the cardinality map.
+    ///
+    /// Returns a const reference to the cardinality map used by
+    /// the algorithm.
+    const ProcessedMap &processedMap() const {
+      return *_processed;
+    }
+
+    /// \brief Sets the heap and the cross reference used by algorithm.
+    ///
+    /// Sets the heap and the cross reference used by algorithm.
+    /// If you don't use this function before calling \ref run(),
+    /// it will allocate one. The destuctor deallocates this
+    /// automatically allocated map, of course.
+    /// \return <tt> (*this) </tt>
+    MaxCardinalitySearch &heap(Heap& hp, HeapCrossRef &cr) {
+      if(local_heap_cross_ref) {
+	delete _heap_cross_ref;
+	local_heap_cross_ref = false;
+      }
+      _heap_cross_ref = &cr;
+      if(local_heap) {
+	delete _heap;
+	local_heap = false;
+      }
+      _heap = &hp;
+      return *this;
+    }
+
+    /// \brief Returns a const reference to the heap.
+    ///
+    /// Returns a const reference to the heap used by
+    /// the algorithm.
+    const Heap &heap() const {
+      return *_heap;
+    }
+
+    /// \brief Returns a const reference to the cross reference.
+    ///
+    /// Returns a const reference to the cross reference
+    /// of the heap.
+    const HeapCrossRef &heapCrossRef() const {
+      return *_heap_cross_ref;
+    }
+
+  private:
+
+    typedef typename Digraph::Node Node;
+    typedef typename Digraph::NodeIt NodeIt;
+    typedef typename Digraph::Arc Arc;
+    typedef typename Digraph::InArcIt InArcIt;
+
+    void create_maps() {
+      if(!_capacity) {
+	local_capacity = true;
+	_capacity = Traits::createCapacityMap(*_graph);
+      }
+      if(!_cardinality) {
+	local_cardinality = true;
+	_cardinality = Traits::createCardinalityMap(*_graph);
+      }
+      if(!_processed) {
+	local_processed = true;
+	_processed = Traits::createProcessedMap(*_graph);
+      }
+      if (!_heap_cross_ref) {
+	local_heap_cross_ref = true;
+	_heap_cross_ref = Traits::createHeapCrossRef(*_graph);
+      }
+      if (!_heap) {
+	local_heap = true;
+	_heap = Traits::createHeap(*_heap_cross_ref);
+      }
+    }
+    
+    void finalizeNodeData(Node node, Value capacity) {
+      _processed->set(node, true);
+      _cardinality->set(node, capacity);
+    }
+
+  public:
+    /// \name Execution control
+    /// The simplest way to execute the algorithm is to use
+    /// one of the member functions called \ref run().
+    /// \n
+    /// If you need more control on the execution,
+    /// first you must call \ref init(), then you can add several source nodes
+    /// with \ref addSource().
+    /// Finally \ref start() will perform the computation.
+
+    ///@{
+
+    /// \brief Initializes the internal data structures.
+    ///
+    /// Initializes the internal data structures, and clears the heap.
+    void init() {
+      create_maps();
+      _heap->clear();
+      for (NodeIt it(*_graph) ; it != INVALID ; ++it) {
+	_processed->set(it, false);
+	_heap_cross_ref->set(it, Heap::PRE_HEAP);
+      }
+    }
+    
+    /// \brief Adds a new source node.
+    /// 
+    /// Adds a new source node to the priority heap.
+    ///
+    /// It checks if the node has not yet been added to the heap.
+    void addSource(Node source, Value capacity = 0) {
+      if(_heap->state(source) == Heap::PRE_HEAP) {
+	_heap->push(source, capacity);
+      } 
+    }
+    
+    /// \brief Processes the next node in the priority heap
+    ///
+    /// Processes the next node in the priority heap.
+    ///
+    /// \return The processed node.
+    ///
+    /// \warning The priority heap must not be empty!
+    Node processNextNode() {
+      Node node = _heap->top(); 
+      finalizeNodeData(node, _heap->prio());
+      _heap->pop();
+      
+      for (InArcIt it(*_graph, node); it != INVALID; ++it) {
+	Node source = _graph->source(it);
+	switch (_heap->state(source)) {
+	case Heap::PRE_HEAP:
+	  _heap->push(source, (*_capacity)[it]);
+	  break;
+	case Heap::IN_HEAP:
+	  _heap->decrease(source, (*_heap)[source] + (*_capacity)[it]);
+	  break;
+	case Heap::POST_HEAP:
+	  break;
+	}
+      }
+      return node;
+    }
+
+    /// \brief Next node to be processed.
+    ///
+    /// Next node to be processed.
+    ///
+    /// \return The next node to be processed or INVALID if the 
+    /// priority heap is empty.
+    Node nextNode() { 
+      return !_heap->empty() ? _heap->top() : INVALID;
+    }
+ 
+    /// \brief Returns \c false if there are nodes
+    /// to be processed in the priority heap
+    ///
+    /// Returns \c false if there are nodes
+    /// to be processed in the priority heap
+    bool emptyQueue() { return _heap->empty(); }
+    /// \brief Returns the number of the nodes to be processed 
+    /// in the priority heap
+    ///
+    /// Returns the number of the nodes to be processed in the priority heap
+    int emptySize() { return _heap->size(); }
+    
+    /// \brief Executes the algorithm.
+    ///
+    /// Executes the algorithm.
+    ///
+    ///\pre init() must be called and at least one node should be added
+    /// with addSource() before using this function.
+    ///
+    /// This method runs the Maximum Cardinality Search algorithm from the 
+    /// source node(s).
+    void start() {
+      while ( !_heap->empty() ) processNextNode();
+    }
+    
+    /// \brief Executes the algorithm until \c dest is reached.
+    ///
+    /// Executes the algorithm until \c dest is reached.
+    ///
+    /// \pre init() must be called and at least one node should be added
+    /// with addSource() before using this function.
+    ///
+    /// This method runs the %MaxCardinalitySearch algorithm from the source 
+    /// nodes.
+    void start(Node dest) {
+      while ( !_heap->empty() && _heap->top()!=dest ) processNextNode();
+      if ( !_heap->empty() ) finalizeNodeData(_heap->top(), _heap->prio());
+    }
+    
+    /// \brief Executes the algorithm until a condition is met.
+    ///
+    /// Executes the algorithm until a condition is met.
+    ///
+    /// \pre init() must be called and at least one node should be added
+    /// with addSource() before using this function.
+    ///
+    /// \param nm must be a bool (or convertible) node map. The algorithm
+    /// will stop when it reaches a node \c v with <tt>nm[v]==true</tt>.
+    template <typename NodeBoolMap>
+    void start(const NodeBoolMap &nm) {
+      while ( !_heap->empty() && !nm[_heap->top()] ) processNextNode();
+      if ( !_heap->empty() ) finalizeNodeData(_heap->top(),_heap->prio());
+    }
+    
+    /// \brief Runs the maximum cardinality search algorithm from node \c s.
+    ///
+    /// This method runs the %MaxCardinalitySearch algorithm from a root 
+    /// node \c s.
+    ///
+    ///\note d.run(s) is just a shortcut of the following code.
+    ///\code
+    ///  d.init();
+    ///  d.addSource(s);
+    ///  d.start();
+    ///\endcode
+    void run(Node s) {
+      init();
+      addSource(s);
+      start();
+    }
+
+    /// \brief Runs the maximum cardinality search algorithm for the 
+    /// whole digraph.
+    ///
+    /// This method runs the %MaxCardinalitySearch algorithm from all 
+    /// unprocessed node of the digraph.
+    ///
+    ///\note d.run(s) is just a shortcut of the following code.
+    ///\code
+    ///  d.init();
+    ///  for (NodeIt it(digraph); it != INVALID; ++it) {
+    ///    if (!d.reached(it)) {
+    ///      d.addSource(s);
+    ///      d.start();
+    ///    }
+    ///  }
+    ///\endcode
+    void run() {
+      init();
+      for (NodeIt it(*_graph); it != INVALID; ++it) {
+        if (!reached(it)) {
+          addSource(it);
+          start();
+        }
+      }
+    }
+    
+    ///@}
+
+    /// \name Query Functions
+    /// The results of the maximum cardinality search algorithm can be 
+    /// obtained using these functions.
+    /// \n
+    /// Before the use of these functions, either run() or start() must be 
+    /// called.
+    
+    ///@{
+
+    /// \brief The cardinality of a node.
+    ///
+    /// Returns the cardinality of a node.
+    /// \pre \ref run() must be called before using this function.
+    /// \warning If node \c v in unreachable from the root the return value
+    /// of this funcion is undefined.
+    Value cardinality(Node node) const { return (*_cardinality)[node]; }
+
+    /// \brief The current cardinality of a node.
+    ///
+    /// Returns the current cardinality of a node.
+    /// \pre the given node should be reached but not processed
+    Value currentCardinality(Node node) const { return (*_heap)[node]; }
+
+    /// \brief Returns a reference to the NodeMap of cardinalities.
+    ///
+    /// Returns a reference to the NodeMap of cardinalities. \pre \ref run() 
+    /// must be called before using this function.
+    const CardinalityMap &cardinalityMap() const { return *_cardinality;}
+ 
+    /// \brief Checks if a node is reachable from the root.
+    ///
+    /// Returns \c true if \c v is reachable from the root.
+    /// \warning The source nodes are initated as unreached.
+    /// \pre \ref run() must be called before using this function.
+    bool reached(Node v) { return (*_heap_cross_ref)[v] != Heap::PRE_HEAP; }
+
+    /// \brief Checks if a node is processed.
+    ///
+    /// Returns \c true if \c v is processed, i.e. the shortest
+    /// path to \c v has already found.
+    /// \pre \ref run() must be called before using this function.
+    bool processed(Node v) { return (*_heap_cross_ref)[v] == Heap::POST_HEAP; }
+    
+    ///@}
+  };
+
+}
+
+#endif
Index: lemon/nagamochi_ibaraki.h
===================================================================
--- lemon/nagamochi_ibaraki.h	(revision 978)
+++ lemon/nagamochi_ibaraki.h	(revision 978)
@@ -0,0 +1,702 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#ifndef LEMON_NAGAMOCHI_IBARAKI_H
+#define LEMON_NAGAMOCHI_IBARAKI_H
+
+
+/// \ingroup min_cut
+/// \file
+/// \brief Implementation of the Nagamochi-Ibaraki algorithm.
+
+#include <lemon/core.h>
+#include <lemon/bin_heap.h>
+#include <lemon/bucket_heap.h>
+#include <lemon/maps.h>
+#include <lemon/radix_sort.h>
+#include <lemon/unionfind.h>
+
+#include <cassert>
+
+namespace lemon {
+
+  /// \brief Default traits class for NagamochiIbaraki class.
+  ///
+  /// Default traits class for NagamochiIbaraki class.
+  /// \param GR The undirected graph type.
+  /// \param CM Type of capacity map.
+  template <typename GR, typename CM>
+  struct NagamochiIbarakiDefaultTraits {
+    /// The type of the capacity map.
+    typedef typename CM::Value Value;
+
+    /// The undirected graph type the algorithm runs on.
+    typedef GR Graph;
+
+    /// \brief The type of the map that stores the edge capacities.
+    ///
+    /// The type of the map that stores the edge capacities.
+    /// It must meet the \ref concepts::ReadMap "ReadMap" concept.
+    typedef CM CapacityMap;
+
+    /// \brief Instantiates a CapacityMap.
+    ///
+    /// This function instantiates a \ref CapacityMap.
+#ifdef DOXYGEN
+    static CapacityMap *createCapacityMap(const Graph& graph)
+#else
+    static CapacityMap *createCapacityMap(const Graph&)
+#endif
+    {
+        LEMON_ASSERT(false, "CapacityMap is not initialized");
+        return 0; // ignore warnings
+    }
+
+    /// \brief The cross reference type used by heap.
+    ///
+    /// The cross reference type used by heap.
+    /// Usually \c Graph::NodeMap<int>.
+    typedef typename Graph::template NodeMap<int> HeapCrossRef;
+
+    /// \brief Instantiates a HeapCrossRef.
+    ///
+    /// This function instantiates a \ref HeapCrossRef.
+    /// \param g is the graph, to which we would like to define the
+    /// \ref HeapCrossRef.
+    static HeapCrossRef *createHeapCrossRef(const Graph& g) {
+      return new HeapCrossRef(g);
+    }
+
+    /// \brief The heap type used by NagamochiIbaraki algorithm.
+    ///
+    /// The heap type used by NagamochiIbaraki algorithm. It has to
+    /// maximize the priorities.
+    ///
+    /// \sa BinHeap
+    /// \sa NagamochiIbaraki
+    typedef BinHeap<Value, HeapCrossRef, std::greater<Value> > Heap;
+
+    /// \brief Instantiates a Heap.
+    ///
+    /// This function instantiates a \ref Heap.
+    /// \param r is the cross reference of the heap.
+    static Heap *createHeap(HeapCrossRef& r) {
+      return new Heap(r);
+    }
+  };
+
+  /// \ingroup min_cut
+  ///
+  /// \brief Calculates the minimum cut in an undirected graph.
+  ///
+  /// Calculates the minimum cut in an undirected graph with the
+  /// Nagamochi-Ibaraki algorithm. The algorithm separates the graph's
+  /// nodes into two partitions with the minimum sum of edge capacities
+  /// between the two partitions. The algorithm can be used to test
+  /// the network reliability, especially to test how many links have
+  /// to be destroyed in the network to split it to at least two
+  /// distinict subnetworks.
+  ///
+  /// The complexity of the algorithm is \f$ O(nm\log(n)) \f$ but with
+  /// \ref FibHeap "Fibonacci heap" it can be decreased to
+  /// \f$ O(nm+n^2\log(n)) \f$.  When the edges have unit capacities,
+  /// \c BucketHeap can be used which yields \f$ O(nm) \f$ time
+  /// complexity.
+  ///
+  /// \warning The value type of the capacity map should be able to
+  /// hold any cut value of the graph, otherwise the result can
+  /// overflow.
+  /// \note This capacity is supposed to be integer type.
+#ifdef DOXYGEN
+  template <typename GR, typename CM, typename TR>
+#else
+  template <typename GR,
+            typename CM = typename GR::template EdgeMap<int>,
+            typename TR = NagamochiIbarakiDefaultTraits<GR, CM> >
+#endif
+  class NagamochiIbaraki {
+  public:
+
+    typedef TR Traits;
+    /// The type of the underlying graph.
+    typedef typename Traits::Graph Graph;
+
+    /// The type of the capacity map.
+    typedef typename Traits::CapacityMap CapacityMap;
+    /// The value type of the capacity map.
+    typedef typename Traits::CapacityMap::Value Value;
+
+    /// The heap type used by the algorithm.
+    typedef typename Traits::Heap Heap;
+    /// The cross reference type used for the heap.
+    typedef typename Traits::HeapCrossRef HeapCrossRef;
+
+    ///\name Named template parameters
+
+    ///@{
+
+    struct SetUnitCapacityTraits : public Traits {
+      typedef ConstMap<typename Graph::Edge, Const<int, 1> > CapacityMap;
+      static CapacityMap *createCapacityMap(const Graph&) {
+        return new CapacityMap();
+      }
+    };
+
+    /// \brief \ref named-templ-param "Named parameter" for setting
+    /// the capacity map to a constMap<Edge, int, 1>() instance
+    ///
+    /// \ref named-templ-param "Named parameter" for setting
+    /// the capacity map to a constMap<Edge, int, 1>() instance
+    struct SetUnitCapacity
+      : public NagamochiIbaraki<Graph, CapacityMap,
+                                SetUnitCapacityTraits> {
+      typedef NagamochiIbaraki<Graph, CapacityMap,
+                               SetUnitCapacityTraits> Create;
+    };
+
+
+    template <class H, class CR>
+    struct SetHeapTraits : public Traits {
+      typedef CR HeapCrossRef;
+      typedef H Heap;
+      static HeapCrossRef *createHeapCrossRef(int num) {
+        LEMON_ASSERT(false, "HeapCrossRef is not initialized");
+        return 0; // ignore warnings
+      }
+      static Heap *createHeap(HeapCrossRef &) {
+        LEMON_ASSERT(false, "Heap is not initialized");
+        return 0; // ignore warnings
+      }
+    };
+
+    /// \brief \ref named-templ-param "Named parameter" for setting
+    /// heap and cross reference type
+    ///
+    /// \ref named-templ-param "Named parameter" for setting heap and
+    /// cross reference type. The heap has to maximize the priorities.
+    template <class H, class CR = RangeMap<int> >
+    struct SetHeap
+      : public NagamochiIbaraki<Graph, CapacityMap, SetHeapTraits<H, CR> > {
+      typedef NagamochiIbaraki< Graph, CapacityMap, SetHeapTraits<H, CR> >
+      Create;
+    };
+
+    template <class H, class CR>
+    struct SetStandardHeapTraits : public Traits {
+      typedef CR HeapCrossRef;
+      typedef H Heap;
+      static HeapCrossRef *createHeapCrossRef(int size) {
+        return new HeapCrossRef(size);
+      }
+      static Heap *createHeap(HeapCrossRef &crossref) {
+        return new Heap(crossref);
+      }
+    };
+
+    /// \brief \ref named-templ-param "Named parameter" for setting
+    /// heap and cross reference type with automatic allocation
+    ///
+    /// \ref named-templ-param "Named parameter" for setting heap and
+    /// cross reference type with automatic allocation. They should
+    /// have standard constructor interfaces to be able to
+    /// automatically created by the algorithm (i.e. the graph should
+    /// be passed to the constructor of the cross reference and the
+    /// cross reference should be passed to the constructor of the
+    /// heap). However, external heap and cross reference objects
+    /// could also be passed to the algorithm using the \ref heap()
+    /// function before calling \ref run() or \ref init(). The heap
+    /// has to maximize the priorities.
+    /// \sa SetHeap
+    template <class H, class CR = RangeMap<int> >
+    struct SetStandardHeap
+      : public NagamochiIbaraki<Graph, CapacityMap,
+                                SetStandardHeapTraits<H, CR> > {
+      typedef NagamochiIbaraki<Graph, CapacityMap,
+                               SetStandardHeapTraits<H, CR> > Create;
+    };
+
+    ///@}
+
+
+  private:
+
+    const Graph &_graph;
+    const CapacityMap *_capacity;
+    bool _local_capacity; // unit capacity
+
+    struct ArcData {
+      typename Graph::Node target;
+      int prev, next;
+    };
+    struct EdgeData {
+      Value capacity;
+      Value cut;
+    };
+
+    struct NodeData {
+      int first_arc;
+      typename Graph::Node prev, next;
+      int curr_arc;
+      typename Graph::Node last_rep;
+      Value sum;
+    };
+
+    typename Graph::template NodeMap<NodeData> *_nodes;
+    std::vector<ArcData> _arcs;
+    std::vector<EdgeData> _edges;
+
+    typename Graph::Node _first_node;
+    int _node_num;
+
+    Value _min_cut;
+
+    HeapCrossRef *_heap_cross_ref;
+    bool _local_heap_cross_ref;
+    Heap *_heap;
+    bool _local_heap;
+
+    typedef typename Graph::template NodeMap<typename Graph::Node> NodeList;
+    NodeList *_next_rep;
+
+    typedef typename Graph::template NodeMap<bool> MinCutMap;
+    MinCutMap *_cut_map;
+
+    void createStructures() {
+      if (!_nodes) {
+        _nodes = new (typename Graph::template NodeMap<NodeData>)(_graph);
+      }
+      if (!_capacity) {
+        _local_capacity = true;
+        _capacity = Traits::createCapacityMap(_graph);
+      }
+      if (!_heap_cross_ref) {
+        _local_heap_cross_ref = true;
+        _heap_cross_ref = Traits::createHeapCrossRef(_graph);
+      }
+      if (!_heap) {
+        _local_heap = true;
+        _heap = Traits::createHeap(*_heap_cross_ref);
+      }
+      if (!_next_rep) {
+        _next_rep = new NodeList(_graph);
+      }
+      if (!_cut_map) {
+        _cut_map = new MinCutMap(_graph);
+      }
+    }
+
+  protected:
+    //This is here to avoid a gcc-3.3 compilation error.
+    //It should never be called.
+    NagamochiIbaraki() {} 
+    
+  public:
+
+    typedef NagamochiIbaraki Create;
+
+
+    /// \brief Constructor.
+    ///
+    /// \param graph The graph the algorithm runs on.
+    /// \param capacity The capacity map used by the algorithm.
+    NagamochiIbaraki(const Graph& graph, const CapacityMap& capacity)
+      : _graph(graph), _capacity(&capacity), _local_capacity(false),
+        _nodes(0), _arcs(), _edges(), _min_cut(),
+        _heap_cross_ref(0), _local_heap_cross_ref(false),
+        _heap(0), _local_heap(false),
+        _next_rep(0), _cut_map(0) {}
+
+    /// \brief Constructor.
+    ///
+    /// This constructor can be used only when the Traits class
+    /// defines how can the local capacity map be instantiated.
+    /// If the SetUnitCapacity used the algorithm automatically
+    /// constructs the capacity map.
+    ///
+    ///\param graph The graph the algorithm runs on.
+    NagamochiIbaraki(const Graph& graph)
+      : _graph(graph), _capacity(0), _local_capacity(false),
+        _nodes(0), _arcs(), _edges(), _min_cut(),
+        _heap_cross_ref(0), _local_heap_cross_ref(false),
+        _heap(0), _local_heap(false),
+        _next_rep(0), _cut_map(0) {}
+
+    /// \brief Destructor.
+    ///
+    /// Destructor.
+    ~NagamochiIbaraki() {
+      if (_local_capacity) delete _capacity;
+      if (_nodes) delete _nodes;
+      if (_local_heap) delete _heap;
+      if (_local_heap_cross_ref) delete _heap_cross_ref;
+      if (_next_rep) delete _next_rep;
+      if (_cut_map) delete _cut_map;
+    }
+
+    /// \brief Sets the heap and the cross reference used by algorithm.
+    ///
+    /// Sets the heap and the cross reference used by algorithm.
+    /// If you don't use this function before calling \ref run(),
+    /// it will allocate one. The destuctor deallocates this
+    /// automatically allocated heap and cross reference, of course.
+    /// \return <tt> (*this) </tt>
+    NagamochiIbaraki &heap(Heap& hp, HeapCrossRef &cr)
+    {
+      if (_local_heap_cross_ref) {
+        delete _heap_cross_ref;
+        _local_heap_cross_ref = false;
+      }
+      _heap_cross_ref = &cr;
+      if (_local_heap) {
+        delete _heap;
+        _local_heap = false;
+      }
+      _heap = &hp;
+      return *this;
+    }
+
+    /// \name Execution control
+    /// The simplest way to execute the algorithm is to use
+    /// one of the member functions called \c run().
+    /// \n
+    /// If you need more control on the execution,
+    /// first you must call \ref init() and then call the start()
+    /// or proper times the processNextPhase() member functions.
+
+    ///@{
+
+    /// \brief Initializes the internal data structures.
+    ///
+    /// Initializes the internal data structures.
+    void init() {
+      createStructures();
+
+      int edge_num = countEdges(_graph);
+      _edges.resize(edge_num);
+      _arcs.resize(2 * edge_num);
+
+      typename Graph::Node prev = INVALID;
+      _node_num = 0;
+      for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+        (*_cut_map)[n] = false;
+        (*_next_rep)[n] = INVALID;
+        (*_nodes)[n].last_rep = n;
+        (*_nodes)[n].first_arc = -1;
+        (*_nodes)[n].curr_arc = -1;
+        (*_nodes)[n].prev = prev;
+        if (prev != INVALID) {
+          (*_nodes)[prev].next = n;
+        }
+        (*_nodes)[n].next = INVALID;
+        (*_nodes)[n].sum = 0;
+        prev = n;
+        ++_node_num;
+      }
+
+      _first_node = typename Graph::NodeIt(_graph);
+
+      int index = 0;
+      for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+        for (typename Graph::OutArcIt a(_graph, n); a != INVALID; ++a) {
+          typename Graph::Node m = _graph.target(a);
+          
+          if (!(n < m)) continue;
+
+          (*_nodes)[n].sum += (*_capacity)[a];
+          (*_nodes)[m].sum += (*_capacity)[a];
+          
+          int c = (*_nodes)[m].curr_arc;
+          if (c != -1 && _arcs[c ^ 1].target == n) {
+            _edges[c >> 1].capacity += (*_capacity)[a];
+          } else {
+            _edges[index].capacity = (*_capacity)[a];
+            
+            _arcs[index << 1].prev = -1;
+            if ((*_nodes)[n].first_arc != -1) {
+              _arcs[(*_nodes)[n].first_arc].prev = (index << 1);
+            }
+            _arcs[index << 1].next = (*_nodes)[n].first_arc;
+            (*_nodes)[n].first_arc = (index << 1);
+            _arcs[index << 1].target = m;
+
+            (*_nodes)[m].curr_arc = (index << 1);
+            
+            _arcs[(index << 1) | 1].prev = -1;
+            if ((*_nodes)[m].first_arc != -1) {
+              _arcs[(*_nodes)[m].first_arc].prev = ((index << 1) | 1);
+            }
+            _arcs[(index << 1) | 1].next = (*_nodes)[m].first_arc;
+            (*_nodes)[m].first_arc = ((index << 1) | 1);
+            _arcs[(index << 1) | 1].target = n;
+            
+            ++index;
+          }
+        }
+      }
+
+      typename Graph::Node cut_node = INVALID;
+      _min_cut = std::numeric_limits<Value>::max();
+
+      for (typename Graph::Node n = _first_node; 
+           n != INVALID; n = (*_nodes)[n].next) {
+        if ((*_nodes)[n].sum < _min_cut) {
+          cut_node = n;
+          _min_cut = (*_nodes)[n].sum;
+        }
+      }
+      (*_cut_map)[cut_node] = true;
+      if (_min_cut == 0) {
+        _first_node = INVALID;
+      }
+    }
+
+  public:
+
+    /// \brief Processes the next phase
+    ///
+    /// Processes the next phase in the algorithm. It must be called
+    /// at most one less the number of the nodes in the graph.
+    ///
+    ///\return %True when the algorithm finished.
+    bool processNextPhase() {
+      if (_first_node == INVALID) return true;
+
+      _heap->clear();
+      for (typename Graph::Node n = _first_node; 
+           n != INVALID; n = (*_nodes)[n].next) {
+        (*_heap_cross_ref)[n] = Heap::PRE_HEAP;
+      }
+
+      std::vector<typename Graph::Node> order;
+      order.reserve(_node_num);
+      int sep = 0;
+
+      Value alpha = 0;
+      Value pmc = std::numeric_limits<Value>::max();
+
+      _heap->push(_first_node, static_cast<Value>(0));
+      while (!_heap->empty()) {
+        typename Graph::Node n = _heap->top();
+        Value v = _heap->prio();
+
+        _heap->pop();
+        for (int a = (*_nodes)[n].first_arc; a != -1; a = _arcs[a].next) {
+          switch (_heap->state(_arcs[a].target)) {
+          case Heap::PRE_HEAP: 
+            {
+              Value nv = _edges[a >> 1].capacity;
+              _heap->push(_arcs[a].target, nv);
+              _edges[a >> 1].cut = nv;
+            } break;
+          case Heap::IN_HEAP:
+            {
+              Value nv = _edges[a >> 1].capacity + (*_heap)[_arcs[a].target];
+              _heap->decrease(_arcs[a].target, nv);
+              _edges[a >> 1].cut = nv;
+            } break;
+          case Heap::POST_HEAP:
+            break;
+          }
+        }
+
+        alpha += (*_nodes)[n].sum;
+        alpha -= 2 * v;
+
+        order.push_back(n);
+        if (!_heap->empty()) {
+          if (alpha < pmc) {
+            pmc = alpha;
+            sep = order.size();
+          }
+        }
+      }
+
+      if (static_cast<int>(order.size()) < _node_num) {
+        _first_node = INVALID;
+        for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+          (*_cut_map)[n] = false;
+        }
+        for (int i = 0; i < static_cast<int>(order.size()); ++i) {
+          typename Graph::Node n = order[i];
+          while (n != INVALID) {
+            (*_cut_map)[n] = true;
+            n = (*_next_rep)[n];
+          }
+        }
+        _min_cut = 0;
+        return true;
+      }
+
+      if (pmc < _min_cut) {
+        for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+          (*_cut_map)[n] = false;
+        }
+        for (int i = 0; i < sep; ++i) {
+          typename Graph::Node n = order[i];
+          while (n != INVALID) {
+            (*_cut_map)[n] = true;
+            n = (*_next_rep)[n];
+          }
+        }
+        _min_cut = pmc;
+      }
+
+      for (typename Graph::Node n = _first_node;
+           n != INVALID; n = (*_nodes)[n].next) {
+        bool merged = false;
+        for (int a = (*_nodes)[n].first_arc; a != -1; a = _arcs[a].next) {
+          if (!(_edges[a >> 1].cut < pmc)) {
+            if (!merged) {
+              for (int b = (*_nodes)[n].first_arc; b != -1; b = _arcs[b].next) {
+                (*_nodes)[_arcs[b].target].curr_arc = b;          
+              }
+              merged = true;
+            }
+            typename Graph::Node m = _arcs[a].target;
+            int nb = 0;
+            for (int b = (*_nodes)[m].first_arc; b != -1; b = nb) {
+              nb = _arcs[b].next;
+              if ((b ^ a) == 1) continue;
+              typename Graph::Node o = _arcs[b].target;
+              int c = (*_nodes)[o].curr_arc; 
+              if (c != -1 && _arcs[c ^ 1].target == n) {
+                _edges[c >> 1].capacity += _edges[b >> 1].capacity;
+                (*_nodes)[n].sum += _edges[b >> 1].capacity;
+                if (_edges[b >> 1].cut < _edges[c >> 1].cut) {
+                  _edges[b >> 1].cut = _edges[c >> 1].cut;
+                }
+                if (_arcs[b ^ 1].prev != -1) {
+                  _arcs[_arcs[b ^ 1].prev].next = _arcs[b ^ 1].next;
+                } else {
+                  (*_nodes)[o].first_arc = _arcs[b ^ 1].next;
+                }
+                if (_arcs[b ^ 1].next != -1) {
+                  _arcs[_arcs[b ^ 1].next].prev = _arcs[b ^ 1].prev;
+                }
+              } else {
+                if (_arcs[a].next != -1) {
+                  _arcs[_arcs[a].next].prev = b;
+                }
+                _arcs[b].next = _arcs[a].next;
+                _arcs[b].prev = a;
+                _arcs[a].next = b;
+                _arcs[b ^ 1].target = n;
+
+                (*_nodes)[n].sum += _edges[b >> 1].capacity;
+                (*_nodes)[o].curr_arc = b;
+              }
+            }
+
+            if (_arcs[a].prev != -1) {
+              _arcs[_arcs[a].prev].next = _arcs[a].next;
+            } else {
+              (*_nodes)[n].first_arc = _arcs[a].next;
+            }            
+            if (_arcs[a].next != -1) {
+              _arcs[_arcs[a].next].prev = _arcs[a].prev;
+            }
+
+            (*_nodes)[n].sum -= _edges[a >> 1].capacity;
+            (*_next_rep)[(*_nodes)[n].last_rep] = m;
+            (*_nodes)[n].last_rep = (*_nodes)[m].last_rep;
+            
+            if ((*_nodes)[m].prev != INVALID) {
+              (*_nodes)[(*_nodes)[m].prev].next = (*_nodes)[m].next;
+            } else{
+              _first_node = (*_nodes)[m].next;
+            }
+            if ((*_nodes)[m].next != INVALID) {
+              (*_nodes)[(*_nodes)[m].next].prev = (*_nodes)[m].prev;
+            }
+            --_node_num;
+          }
+        }
+      }
+
+      if (_node_num == 1) {
+        _first_node = INVALID;
+        return true;
+      }
+
+      return false;
+    }
+
+    /// \brief Executes the algorithm.
+    ///
+    /// Executes the algorithm.
+    ///
+    /// \pre init() must be called
+    void start() {
+      while (!processNextPhase()) {}
+    }
+
+
+    /// \brief Runs %NagamochiIbaraki algorithm.
+    ///
+    /// This method runs the %Min cut algorithm
+    ///
+    /// \note mc.run(s) is just a shortcut of the following code.
+    ///\code
+    ///  mc.init();
+    ///  mc.start();
+    ///\endcode
+    void run() {
+      init();
+      start();
+    }
+
+    ///@}
+
+    /// \name Query Functions
+    ///
+    /// The result of the %NagamochiIbaraki
+    /// algorithm can be obtained using these functions.\n
+    /// Before the use of these functions, either run() or start()
+    /// must be called.
+
+    ///@{
+
+    /// \brief Returns the min cut value.
+    ///
+    /// Returns the min cut value if the algorithm finished.
+    /// After the first processNextPhase() it is a value of a
+    /// valid cut in the graph.
+    Value minCutValue() const {
+      return _min_cut;
+    }
+
+    /// \brief Returns a min cut in a NodeMap.
+    ///
+    /// It sets the nodes of one of the two partitions to true and
+    /// the other partition to false.
+    /// \param cutMap A \ref concepts::WriteMap "writable" node map with
+    /// \c bool (or convertible) value type.
+    template <typename CutMap>
+    Value minCutMap(CutMap& cutMap) const {
+      for (typename Graph::NodeIt n(_graph); n != INVALID; ++n) {
+        cutMap.set(n, (*_cut_map)[n]);
+      }
+      return minCutValue();
+    }
+
+    ///@}
+
+  };
+}
+
+#endif
Index: lemon/network_simplex.h
===================================================================
--- lemon/network_simplex.h	(revision 889)
+++ lemon/network_simplex.h	(revision 1004)
@@ -48,8 +48,9 @@
   /// flow problem.
   ///
-  /// In general, %NetworkSimplex is the fastest implementation available
-  /// in LEMON for this problem.
-  /// Moreover, it supports both directions of the supply/demand inequality
-  /// constraints. For more information, see \ref SupplyType.
+  /// In general, \ref NetworkSimplex and \ref CostScaling are the fastest
+  /// implementations available in LEMON for solving this problem.
+  /// (For more information, see \ref min_cost_flow_algs "the module page".)
+  /// Furthermore, this class supports both directions of the supply/demand
+  /// inequality constraints. For more information, see \ref SupplyType.
   ///
   /// Most of the parameters of the problem (except for the digraph)
@@ -64,5 +65,6 @@
   /// algorithm. By default, it is the same as \c V.
   ///
-  /// \warning Both number types must be signed and all input data must
+  /// \warning Both \c V and \c C must be signed number types.
+  /// \warning All input data (capacities, supply values, and costs) must
   /// be integer.
   ///
@@ -122,12 +124,15 @@
     /// the \ref run() function.
     ///
-    /// \ref NetworkSimplex provides five different pivot rule
-    /// implementations that significantly affect the running time
+    /// \ref NetworkSimplex provides five different implementations for
+    /// the pivot strategy that significantly affects the running time
     /// of the algorithm.
-    /// By default, \ref BLOCK_SEARCH "Block Search" is used, which
-    /// proved to be the most efficient and the most robust on various
-    /// test inputs.
-    /// However, another pivot rule can be selected using the \ref run()
-    /// function with the proper parameter.
+    /// According to experimental tests conducted on various problem
+    /// instances, \ref BLOCK_SEARCH "Block Search" and
+    /// \ref ALTERING_LIST "Altering Candidate List" rules turned out
+    /// to be the most efficient.
+    /// Since \ref BLOCK_SEARCH "Block Search" is a simpler strategy that
+    /// seemed to be slightly more robust, it is used by default.
+    /// However, another pivot rule can easily be selected using the
+    /// \ref run() function with the proper parameter.
     enum PivotRule {
 
@@ -155,5 +160,5 @@
       /// The \e Altering \e Candidate \e List pivot rule.
       /// It is a modified version of the Candidate List method.
-      /// It keeps only the several best eligible arcs from the former
+      /// It keeps only a few of the best eligible arcs from the former
       /// candidate list and extends this list in every iteration.
       ALTERING_LIST
@@ -167,6 +172,7 @@
     typedef std::vector<Value> ValueVector;
     typedef std::vector<Cost> CostVector;
-    typedef std::vector<char> BoolVector;
-    // Note: vector<char> is used instead of vector<bool> for efficiency reasons
+    typedef std::vector<signed char> CharVector;
+    // Note: vector<signed char> is used instead of vector<ArcState> and
+    // vector<ArcDirection> for efficiency reasons
 
     // State constants for arcs
@@ -177,7 +183,9 @@
     };
 
-    typedef std::vector<signed char> StateVector;
-    // Note: vector<signed char> is used instead of vector<ArcState> for
-    // efficiency reasons
+    // Direction constants for tree arcs
+    enum ArcDirection {
+      DIR_DOWN = -1,
+      DIR_UP   =  1
+    };
 
   private:
@@ -218,13 +226,11 @@
     IntVector _succ_num;
     IntVector _last_succ;
+    CharVector _pred_dir;
+    CharVector _state;
     IntVector _dirty_revs;
-    BoolVector _forward;
-    StateVector _state;
     int _root;
 
     // Temporary data used in the current pivot iteration
     int in_arc, join, u_in, v_in, u_out, v_out;
-    int first, second, right, last;
-    int stem, par_stem, new_stem;
     Value delta;
 
@@ -251,5 +257,5 @@
       const IntVector  &_target;
       const CostVector &_cost;
-      const StateVector &_state;
+      const CharVector &_state;
       const CostVector &_pi;
       int &_in_arc;
@@ -303,5 +309,5 @@
       const IntVector  &_target;
       const CostVector &_cost;
-      const StateVector &_state;
+      const CharVector &_state;
       const CostVector &_pi;
       int &_in_arc;
@@ -342,5 +348,5 @@
       const IntVector  &_target;
       const CostVector &_cost;
-      const StateVector &_state;
+      const CharVector &_state;
       const CostVector &_pi;
       int &_in_arc;
@@ -415,5 +421,5 @@
       const IntVector  &_target;
       const CostVector &_cost;
-      const StateVector &_state;
+      const CharVector &_state;
       const CostVector &_pi;
       int &_in_arc;
@@ -518,5 +524,5 @@
       const IntVector  &_target;
       const CostVector &_cost;
-      const StateVector &_state;
+      const CharVector &_state;
       const CostVector &_pi;
       int &_in_arc;
@@ -537,5 +543,5 @@
         SortFunc(const CostVector &map) : _map(map) {}
         bool operator()(int left, int right) {
-          return _map[left] > _map[right];
+          return _map[left] < _map[right];
         }
       };
@@ -555,5 +561,5 @@
         const double BLOCK_SIZE_FACTOR = 1.0;
         const int MIN_BLOCK_SIZE = 10;
-        const double HEAD_LENGTH_FACTOR = 0.1;
+        const double HEAD_LENGTH_FACTOR = 0.01;
         const int MIN_HEAD_LENGTH = 3;
 
@@ -571,9 +577,11 @@
         // Check the current candidate list
         int e;
+        Cost c;
         for (int i = 0; i != _curr_length; ++i) {
           e = _candidates[i];
-          _cand_cost[e] = _state[e] *
-            (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
-          if (_cand_cost[e] >= 0) {
+          c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
+          if (c < 0) {
+            _cand_cost[e] = c;
+          } else {
             _candidates[i--] = _candidates[--_curr_length];
           }
@@ -585,7 +593,7 @@
 
         for (e = _next_arc; e != _search_arc_num; ++e) {
-          _cand_cost[e] = _state[e] *
-            (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
-          if (_cand_cost[e] < 0) {
+          c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
+          if (c < 0) {
+            _cand_cost[e] = c;
             _candidates[_curr_length++] = e;
           }
@@ -597,7 +605,7 @@
         }
         for (e = 0; e != _next_arc; ++e) {
-          _cand_cost[e] = _state[e] *
-            (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
-          if (_cand_cost[e] < 0) {
+          c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
+          if (c < 0) {
+            _cand_cost[e] = c;
             _candidates[_curr_length++] = e;
           }
@@ -612,14 +620,14 @@
       search_end:
 
-        // Make heap of the candidate list (approximating a partial sort)
-        make_heap( _candidates.begin(), _candidates.begin() + _curr_length,
-                   _sort_func );
-
-        // Pop the first element of the heap
+        // Perform partial sort operation on the candidate list
+        int new_length = std::min(_head_length + 1, _curr_length);
+        std::partial_sort(_candidates.begin(), _candidates.begin() + new_length,
+                          _candidates.begin() + _curr_length, _sort_func);
+
+        // Select the entering arc and remove it from the list
         _in_arc = _candidates[0];
         _next_arc = e;
-        pop_heap( _candidates.begin(), _candidates.begin() + _curr_length,
-                  _sort_func );
-        _curr_length = std::min(_head_length, _curr_length - 1);
+        _candidates[0] = _candidates[new_length - 1];
+        _curr_length = new_length - 1;
         return true;
       }
@@ -634,9 +642,10 @@
     ///
     /// \param graph The digraph the algorithm runs on.
-    /// \param arc_mixing Indicate if the arcs have to be stored in a
+    /// \param arc_mixing Indicate if the arcs will be stored in a
     /// mixed order in the internal data structure.
-    /// In special cases, it could lead to better overall performance,
-    /// but it is usually slower. Therefore it is disabled by default.
-    NetworkSimplex(const GR& graph, bool arc_mixing = false) :
+    /// In general, it leads to similar performance as using the original
+    /// arc order, but it makes the algorithm more robust and in special
+    /// cases, even significantly faster. Therefore, it is enabled by default.
+    NetworkSimplex(const GR& graph, bool arc_mixing = true) :
       _graph(graph), _node_id(graph), _arc_id(graph),
       _arc_mixing(arc_mixing),
@@ -731,4 +740,6 @@
     ///
     /// \return <tt>(*this)</tt>
+    ///
+    /// \sa supplyType()
     template<typename SupplyMap>
     NetworkSimplex& supplyMap(const SupplyMap& map) {
@@ -747,5 +758,5 @@
     ///
     /// Using this function has the same effect as using \ref supplyMap()
-    /// with such a map in which \c k is assigned to \c s, \c -k is
+    /// with a map in which \c k is assigned to \c s, \c -k is
     /// assigned to \c t and all other nodes have zero supply value.
     ///
@@ -914,5 +925,5 @@
       _parent.resize(all_node_num);
       _pred.resize(all_node_num);
-      _forward.resize(all_node_num);
+      _pred_dir.resize(all_node_num);
       _thread.resize(all_node_num);
       _rev_thread.resize(all_node_num);
@@ -928,5 +939,5 @@
       if (_arc_mixing) {
         // Store the arcs in a mixed order
-        int k = std::max(int(std::sqrt(double(_arc_num))), 10);
+        const int skip = std::max(_arc_num / _node_num, 3);
         int i = 0, j = 0;
         for (ArcIt a(_graph); a != INVALID; ++a) {
@@ -934,5 +945,5 @@
           _source[i] = _node_id[_graph.source(a)];
           _target[i] = _node_id[_graph.target(a)];
-          if ((i += k) >= _arc_num) i = ++j;
+          if ((i += skip) >= _arc_num) i = ++j;
         }
       } else {
@@ -1000,5 +1011,6 @@
     }
 
-    /// \brief Return the flow map (the primal solution).
+    /// \brief Copy the flow values (the primal solution) into the
+    /// given map.
     ///
     /// This function copies the flow value on each arc into the given
@@ -1024,5 +1036,6 @@
     }
 
-    /// \brief Return the potential map (the dual solution).
+    /// \brief Copy the potential values (the dual solution) into the
+    /// given map.
     ///
     /// This function copies the potential (dual value) of each node
@@ -1117,5 +1130,5 @@
           _state[e] = STATE_TREE;
           if (_supply[u] >= 0) {
-            _forward[u] = true;
+            _pred_dir[u] = DIR_UP;
             _pi[u] = 0;
             _source[e] = u;
@@ -1124,5 +1137,5 @@
             _cost[e] = 0;
           } else {
-            _forward[u] = false;
+            _pred_dir[u] = DIR_DOWN;
             _pi[u] = ART_COST;
             _source[e] = _root;
@@ -1144,5 +1157,5 @@
           _last_succ[u] = u;
           if (_supply[u] >= 0) {
-            _forward[u] = true;
+            _pred_dir[u] = DIR_UP;
             _pi[u] = 0;
             _pred[u] = e;
@@ -1154,5 +1167,5 @@
             _state[e] = STATE_TREE;
           } else {
-            _forward[u] = false;
+            _pred_dir[u] = DIR_DOWN;
             _pi[u] = ART_COST;
             _pred[u] = f;
@@ -1185,5 +1198,5 @@
           _last_succ[u] = u;
           if (_supply[u] <= 0) {
-            _forward[u] = false;
+            _pred_dir[u] = DIR_DOWN;
             _pi[u] = 0;
             _pred[u] = e;
@@ -1195,5 +1208,5 @@
             _state[e] = STATE_TREE;
           } else {
-            _forward[u] = true;
+            _pred_dir[u] = DIR_UP;
             _pi[u] = -ART_COST;
             _pred[u] = f;
@@ -1238,4 +1251,5 @@
       // Initialize first and second nodes according to the direction
       // of the cycle
+      int first, second;
       if (_state[in_arc] == STATE_LOWER) {
         first  = _source[in_arc];
@@ -1247,12 +1261,15 @@
       delta = _cap[in_arc];
       int result = 0;
-      Value d;
+      Value c, d;
       int e;
 
-      // Search the cycle along the path form the first node to the root
+      // Search the cycle form the first node to the join node
       for (int u = first; u != join; u = _parent[u]) {
         e = _pred[u];
-        d = _forward[u] ?
-          _flow[e] : (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]);
+        d = _flow[e];
+        if (_pred_dir[u] == DIR_DOWN) {
+          c = _cap[e];
+          d = c >= MAX ? INF : c - d;
+        }
         if (d < delta) {
           delta = d;
@@ -1261,9 +1278,13 @@
         }
       }
-      // Search the cycle along the path form the second node to the root
+
+      // Search the cycle form the second node to the join node
       for (int u = second; u != join; u = _parent[u]) {
         e = _pred[u];
-        d = _forward[u] ?
-          (_cap[e] >= MAX ? INF : _cap[e] - _flow[e]) : _flow[e];
+        d = _flow[e];
+        if (_pred_dir[u] == DIR_UP) {
+          c = _cap[e];
+          d = c >= MAX ? INF : c - d;
+        }
         if (d <= delta) {
           delta = d;
@@ -1290,8 +1311,8 @@
         _flow[in_arc] += val;
         for (int u = _source[in_arc]; u != join; u = _parent[u]) {
-          _flow[_pred[u]] += _forward[u] ? -val : val;
+          _flow[_pred[u]] -= _pred_dir[u] * val;
         }
         for (int u = _target[in_arc]; u != join; u = _parent[u]) {
-          _flow[_pred[u]] += _forward[u] ? val : -val;
+          _flow[_pred[u]] += _pred_dir[u] * val;
         }
       }
@@ -1308,5 +1329,4 @@
     // Update the tree structure
     void updateTreeStructure() {
-      int u, w;
       int old_rev_thread = _rev_thread[u_out];
       int old_succ_num = _succ_num[u_out];
@@ -1314,122 +1334,127 @@
       v_out = _parent[u_out];
 
-      u = _last_succ[u_in];  // the last successor of u_in
-      right = _thread[u];    // the node after it
-
-      // Handle the case when old_rev_thread equals to v_in
-      // (it also means that join and v_out coincide)
-      if (old_rev_thread == v_in) {
-        last = _thread[_last_succ[u_out]];
+      // Check if u_in and u_out coincide
+      if (u_in == u_out) {
+        // Update _parent, _pred, _pred_dir
+        _parent[u_in] = v_in;
+        _pred[u_in] = in_arc;
+        _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN;
+
+        // Update _thread and _rev_thread
+        if (_thread[v_in] != u_out) {
+          int after = _thread[old_last_succ];
+          _thread[old_rev_thread] = after;
+          _rev_thread[after] = old_rev_thread;
+          after = _thread[v_in];
+          _thread[v_in] = u_out;
+          _rev_thread[u_out] = v_in;
+          _thread[old_last_succ] = after;
+          _rev_thread[after] = old_last_succ;
+        }
       } else {
-        last = _thread[v_in];
-      }
-
-      // Update _thread and _parent along the stem nodes (i.e. the nodes
-      // between u_in and u_out, whose parent have to be changed)
-      _thread[v_in] = stem = u_in;
-      _dirty_revs.clear();
-      _dirty_revs.push_back(v_in);
-      par_stem = v_in;
-      while (stem != u_out) {
-        // Insert the next stem node into the thread list
-        new_stem = _parent[stem];
-        _thread[u] = new_stem;
-        _dirty_revs.push_back(u);
-
-        // Remove the subtree of stem from the thread list
-        w = _rev_thread[stem];
-        _thread[w] = right;
-        _rev_thread[right] = w;
-
-        // Change the parent node and shift stem nodes
-        _parent[stem] = par_stem;
-        par_stem = stem;
-        stem = new_stem;
-
-        // Update u and right
-        u = _last_succ[stem] == _last_succ[par_stem] ?
-          _rev_thread[par_stem] : _last_succ[stem];
-        right = _thread[u];
-      }
-      _parent[u_out] = par_stem;
-      _thread[u] = last;
-      _rev_thread[last] = u;
-      _last_succ[u_out] = u;
-
-      // Remove the subtree of u_out from the thread list except for
-      // the case when old_rev_thread equals to v_in
-      // (it also means that join and v_out coincide)
-      if (old_rev_thread != v_in) {
-        _thread[old_rev_thread] = right;
-        _rev_thread[right] = old_rev_thread;
-      }
-
-      // Update _rev_thread using the new _thread values
-      for (int i = 0; i != int(_dirty_revs.size()); ++i) {
-        u = _dirty_revs[i];
-        _rev_thread[_thread[u]] = u;
-      }
-
-      // Update _pred, _forward, _last_succ and _succ_num for the
-      // stem nodes from u_out to u_in
-      int tmp_sc = 0, tmp_ls = _last_succ[u_out];
-      u = u_out;
-      while (u != u_in) {
-        w = _parent[u];
-        _pred[u] = _pred[w];
-        _forward[u] = !_forward[w];
-        tmp_sc += _succ_num[u] - _succ_num[w];
-        _succ_num[u] = tmp_sc;
-        _last_succ[w] = tmp_ls;
-        u = w;
-      }
-      _pred[u_in] = in_arc;
-      _forward[u_in] = (u_in == _source[in_arc]);
-      _succ_num[u_in] = old_succ_num;
-
-      // Set limits for updating _last_succ form v_in and v_out
-      // towards the root
-      int up_limit_in = -1;
-      int up_limit_out = -1;
-      if (_last_succ[join] == v_in) {
-        up_limit_out = join;
-      } else {
-        up_limit_in = join;
+        // Handle the case when old_rev_thread equals to v_in
+        // (it also means that join and v_out coincide)
+        int thread_continue = old_rev_thread == v_in ?
+          _thread[old_last_succ] : _thread[v_in];
+
+        // Update _thread and _parent along the stem nodes (i.e. the nodes
+        // between u_in and u_out, whose parent have to be changed)
+        int stem = u_in;              // the current stem node
+        int par_stem = v_in;          // the new parent of stem
+        int next_stem;                // the next stem node
+        int last = _last_succ[u_in];  // the last successor of stem
+        int before, after = _thread[last];
+        _thread[v_in] = u_in;
+        _dirty_revs.clear();
+        _dirty_revs.push_back(v_in);
+        while (stem != u_out) {
+          // Insert the next stem node into the thread list
+          next_stem = _parent[stem];
+          _thread[last] = next_stem;
+          _dirty_revs.push_back(last);
+
+          // Remove the subtree of stem from the thread list
+          before = _rev_thread[stem];
+          _thread[before] = after;
+          _rev_thread[after] = before;
+
+          // Change the parent node and shift stem nodes
+          _parent[stem] = par_stem;
+          par_stem = stem;
+          stem = next_stem;
+
+          // Update last and after
+          last = _last_succ[stem] == _last_succ[par_stem] ?
+            _rev_thread[par_stem] : _last_succ[stem];
+          after = _thread[last];
+        }
+        _parent[u_out] = par_stem;
+        _thread[last] = thread_continue;
+        _rev_thread[thread_continue] = last;
+        _last_succ[u_out] = last;
+
+        // Remove the subtree of u_out from the thread list except for
+        // the case when old_rev_thread equals to v_in
+        if (old_rev_thread != v_in) {
+          _thread[old_rev_thread] = after;
+          _rev_thread[after] = old_rev_thread;
+        }
+
+        // Update _rev_thread using the new _thread values
+        for (int i = 0; i != int(_dirty_revs.size()); ++i) {
+          int u = _dirty_revs[i];
+          _rev_thread[_thread[u]] = u;
+        }
+
+        // Update _pred, _pred_dir, _last_succ and _succ_num for the
+        // stem nodes from u_out to u_in
+        int tmp_sc = 0, tmp_ls = _last_succ[u_out];
+        for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) {
+          _pred[u] = _pred[p];
+          _pred_dir[u] = -_pred_dir[p];
+          tmp_sc += _succ_num[u] - _succ_num[p];
+          _succ_num[u] = tmp_sc;
+          _last_succ[p] = tmp_ls;
+        }
+        _pred[u_in] = in_arc;
+        _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN;
+        _succ_num[u_in] = old_succ_num;
       }
 
       // Update _last_succ from v_in towards the root
-      for (u = v_in; u != up_limit_in && _last_succ[u] == v_in;
-           u = _parent[u]) {
-        _last_succ[u] = _last_succ[u_out];
-      }
+      int up_limit_out = _last_succ[join] == v_in ? join : -1;
+      int last_succ_out = _last_succ[u_out];
+      for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) {
+        _last_succ[u] = last_succ_out;
+      }
+
       // Update _last_succ from v_out towards the root
       if (join != old_rev_thread && v_in != old_rev_thread) {
-        for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
+        for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
              u = _parent[u]) {
           _last_succ[u] = old_rev_thread;
         }
-      } else {
-        for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
+      }
+      else if (last_succ_out != old_last_succ) {
+        for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
              u = _parent[u]) {
-          _last_succ[u] = _last_succ[u_out];
+          _last_succ[u] = last_succ_out;
         }
       }
 
       // Update _succ_num from v_in to join
-      for (u = v_in; u != join; u = _parent[u]) {
+      for (int u = v_in; u != join; u = _parent[u]) {
         _succ_num[u] += old_succ_num;
       }
       // Update _succ_num from v_out to join
-      for (u = v_out; u != join; u = _parent[u]) {
+      for (int u = v_out; u != join; u = _parent[u]) {
         _succ_num[u] -= old_succ_num;
       }
     }
 
-    // Update potentials
+    // Update potentials in the subtree that has been moved
     void updatePotential() {
-      Cost sigma = _forward[u_in] ?
-        _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] :
-        _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]];
-      // Update potentials in the subtree, which has been moved
+      Cost sigma = _pi[v_in] - _pi[u_in] -
+                   _pred_dir[u_in] * _cost[in_arc];
       int end = _thread[_last_succ[u_in]];
       for (int u = u_in; u != end; u = _thread[u]) {
Index: lemon/path.h
===================================================================
--- lemon/path.h	(revision 998)
+++ lemon/path.h	(revision 1000)
@@ -44,5 +44,5 @@
   ///
   /// In a sense, the path can be treated as a list of arcs. The
-  /// lemon path type stores just this list. As a consequence, it
+  /// LEMON path type stores just this list. As a consequence, it
   /// cannot enumerate the nodes of the path and the source node of
   /// a zero length path is undefined.
@@ -149,5 +149,5 @@
     void clear() { head.clear(); tail.clear(); }
 
-    /// \brief The nth arc.
+    /// \brief The n-th arc.
     ///
     /// \pre \c n is in the <tt>[0..length() - 1]</tt> range.
@@ -157,5 +157,5 @@
     }
 
-    /// \brief Initialize arc iterator to point to the nth arc
+    /// \brief Initialize arc iterator to point to the n-th arc
     ///
     /// \pre \c n is in the <tt>[0..length() - 1]</tt> range.
@@ -245,5 +245,5 @@
   ///
   /// In a sense, the path can be treated as a list of arcs. The
-  /// lemon path type stores just this list. As a consequence it
+  /// LEMON path type stores just this list. As a consequence it
   /// cannot enumerate the nodes in the path and the zero length paths
   /// cannot store the source.
@@ -354,5 +354,5 @@
     void clear() { data.clear(); }
 
-    /// \brief The nth arc.
+    /// \brief The n-th arc.
     ///
     /// \pre \c n is in the <tt>[0..length() - 1]</tt> range.
@@ -361,5 +361,5 @@
     }
 
-    /// \brief  Initializes arc iterator to point to the nth arc.
+    /// \brief  Initializes arc iterator to point to the n-th arc.
     ArcIt nthIt(int n) const {
       return ArcIt(*this, n);
@@ -422,5 +422,5 @@
   ///
   /// In a sense, the path can be treated as a list of arcs. The
-  /// lemon path type stores just this list. As a consequence it
+  /// LEMON path type stores just this list. As a consequence it
   /// cannot enumerate the nodes in the path and the zero length paths
   /// cannot store the source.
@@ -544,7 +544,7 @@
     };
 
-    /// \brief The nth arc.
-    ///
-    /// This function looks for the nth arc in O(n) time.
+    /// \brief The n-th arc.
+    ///
+    /// This function looks for the n-th arc in O(n) time.
     /// \pre \c n is in the <tt>[0..length() - 1]</tt> range.
     const Arc& nth(int n) const {
@@ -556,5 +556,5 @@
     }
 
-    /// \brief Initializes arc iterator to point to the nth arc.
+    /// \brief Initializes arc iterator to point to the n-th arc.
     ArcIt nthIt(int n) const {
       Node *node = first;
@@ -775,5 +775,5 @@
   ///
   /// In a sense, the path can be treated as a list of arcs. The
-  /// lemon path type stores just this list. As a consequence it
+  /// LEMON path type stores just this list. As a consequence it
   /// cannot enumerate the nodes in the path and the source node of
   /// a zero length path is undefined.
@@ -884,5 +884,5 @@
     };
 
-    /// \brief The nth arc.
+    /// \brief The n-th arc.
     ///
     /// \pre \c n is in the <tt>[0..length() - 1]</tt> range.
@@ -891,5 +891,5 @@
     }
 
-    /// \brief The arc iterator pointing to the nth arc.
+    /// \brief The arc iterator pointing to the n-th arc.
     ArcIt nthIt(int n) const {
       return ArcIt(*this, n);
@@ -1095,5 +1095,5 @@
   ///
   /// In a sense, the path can be treated as a list of arcs. The
-  /// lemon path type stores only this list. As a consequence, it
+  /// LEMON path type stores only this list. As a consequence, it
   /// cannot enumerate the nodes in the path and the zero length paths
   /// cannot have a source node.
Index: lx_check_coin.m4
===================================================================
--- m4/lx_check_coin.m4	(revision 749)
+++ 	(revision )
@@ -1,136 +1,0 @@
-AC_DEFUN([LX_CHECK_COIN],
-[
-  AC_ARG_WITH([coin],
-AS_HELP_STRING([--with-coin@<:@=PREFIX@:>@], [search for CLP under PREFIX or under the default search paths if PREFIX is not given @<:@default@:>@])
-AS_HELP_STRING([--without-coin], [disable checking for CLP]),
-              [], [with_coin=yes])
-
-  AC_ARG_WITH([coin-includedir],
-AS_HELP_STRING([--with-coin-includedir=DIR], [search for CLP headers in DIR]),
-              [], [with_coin_includedir=no])
-
-  AC_ARG_WITH([coin-libdir],
-AS_HELP_STRING([--with-coin-libdir=DIR], [search for CLP libraries in DIR]),
-              [], [with_coin_libdir=no])
-
-  lx_clp_found=no
-  if test x"$with_coin" != x"no"; then
-    AC_MSG_CHECKING([for CLP])
-
-    if test x"$with_coin_includedir" != x"no"; then
-      CLP_CXXFLAGS="-I$with_coin_includedir"
-    elif test x"$with_coin" != x"yes"; then
-      CLP_CXXFLAGS="-I$with_coin/include"
-    fi
-
-    if test x"$with_coin_libdir" != x"no"; then
-      CLP_LDFLAGS="-L$with_coin_libdir"
-    elif test x"$with_coin" != x"yes"; then
-      CLP_LDFLAGS="-L$with_coin/lib"
-    fi
-    CLP_LIBS="-lClp -lCoinUtils -lm"
-
-    lx_save_cxxflags="$CXXFLAGS"
-    lx_save_ldflags="$LDFLAGS"
-    lx_save_libs="$LIBS"
-    CXXFLAGS="$CLP_CXXFLAGS"
-    LDFLAGS="$CLP_LDFLAGS"
-    LIBS="$CLP_LIBS"
-
-    lx_clp_test_prog='
-      #include <coin/ClpModel.hpp>
-
-      int main(int argc, char** argv)
-      {
-        ClpModel clp;
-        return 0;
-      }'
-
-    AC_LANG_PUSH(C++)
-    AC_LINK_IFELSE([$lx_clp_test_prog], [lx_clp_found=yes], [lx_clp_found=no])
-    AC_LANG_POP(C++)
-
-    CXXFLAGS="$lx_save_cxxflags"
-    LDFLAGS="$lx_save_ldflags"
-    LIBS="$lx_save_libs"
-
-    if test x"$lx_clp_found" = x"yes"; then
-      AC_DEFINE([LEMON_HAVE_CLP], [1], [Define to 1 if you have CLP.])
-      lx_lp_found=yes
-      AC_DEFINE([LEMON_HAVE_LP], [1], [Define to 1 if you have any LP solver.])
-      AC_MSG_RESULT([yes])
-    else
-      CLP_CXXFLAGS=""
-      CLP_LDFLAGS=""
-      CLP_LIBS=""
-      AC_MSG_RESULT([no])
-    fi
-  fi
-  CLP_LIBS="$CLP_LDFLAGS $CLP_LIBS"
-  AC_SUBST(CLP_CXXFLAGS)
-  AC_SUBST(CLP_LIBS)
-  AM_CONDITIONAL([HAVE_CLP], [test x"$lx_clp_found" = x"yes"])
-
-
-  lx_cbc_found=no
-  if test x"$lx_clp_found" = x"yes"; then
-    if test x"$with_coin" != x"no"; then
-      AC_MSG_CHECKING([for CBC])
-
-      if test x"$with_coin_includedir" != x"no"; then
-        CBC_CXXFLAGS="-I$with_coin_includedir"
-      elif test x"$with_coin" != x"yes"; then
-        CBC_CXXFLAGS="-I$with_coin/include"
-      fi
-
-      if test x"$with_coin_libdir" != x"no"; then
-        CBC_LDFLAGS="-L$with_coin_libdir"
-      elif test x"$with_coin" != x"yes"; then
-        CBC_LDFLAGS="-L$with_coin/lib"
-      fi
-      CBC_LIBS="-lOsi -lCbc -lCbcSolver -lClp -lOsiClp -lCoinUtils -lVol -lOsiVol -lCgl -lm -llapack -lblas"
-
-      lx_save_cxxflags="$CXXFLAGS"
-      lx_save_ldflags="$LDFLAGS"
-      lx_save_libs="$LIBS"
-      CXXFLAGS="$CBC_CXXFLAGS"
-      LDFLAGS="$CBC_LDFLAGS"
-      LIBS="$CBC_LIBS"
-
-      lx_cbc_test_prog='
-        #include <coin/CbcModel.hpp>
-
-        int main(int argc, char** argv)
-        {
-          CbcModel cbc;
-          return 0;
-        }'
-
-      AC_LANG_PUSH(C++)
-      AC_LINK_IFELSE([$lx_cbc_test_prog], [lx_cbc_found=yes], [lx_cbc_found=no])
-      AC_LANG_POP(C++)
-
-      CXXFLAGS="$lx_save_cxxflags"
-      LDFLAGS="$lx_save_ldflags"
-      LIBS="$lx_save_libs"
-
-      if test x"$lx_cbc_found" = x"yes"; then
-        AC_DEFINE([LEMON_HAVE_CBC], [1], [Define to 1 if you have CBC.])
-        lx_lp_found=yes
-        AC_DEFINE([LEMON_HAVE_LP], [1], [Define to 1 if you have any LP solver.])
-        lx_mip_found=yes
-        AC_DEFINE([LEMON_HAVE_MIP], [1], [Define to 1 if you have any MIP solver.])
-        AC_MSG_RESULT([yes])
-      else
-        CBC_CXXFLAGS=""
-        CBC_LDFLAGS=""
-        CBC_LIBS=""
-        AC_MSG_RESULT([no])
-      fi
-    fi
-  fi
-  CBC_LIBS="$CBC_LDFLAGS $CBC_LIBS"
-  AC_SUBST(CBC_CXXFLAGS)
-  AC_SUBST(CBC_LIBS)
-  AM_CONDITIONAL([HAVE_CBC], [test x"$lx_cbc_found" = x"yes"])
-])
Index: lx_check_cplex.m4
===================================================================
--- m4/lx_check_cplex.m4	(revision 627)
+++ 	(revision )
@@ -1,81 +1,0 @@
-AC_DEFUN([LX_CHECK_CPLEX],
-[
-  AC_ARG_WITH([cplex],
-AS_HELP_STRING([--with-cplex@<:@=PREFIX@:>@], [search for CPLEX under PREFIX or under the default search paths if PREFIX is not given @<:@default@:>@])
-AS_HELP_STRING([--without-cplex], [disable checking for CPLEX]),
-              [], [with_cplex=yes])
-
-  AC_ARG_WITH([cplex-includedir],
-AS_HELP_STRING([--with-cplex-includedir=DIR], [search for CPLEX headers in DIR]),
-              [], [with_cplex_includedir=no])
-
-  AC_ARG_WITH([cplex-libdir],
-AS_HELP_STRING([--with-cplex-libdir=DIR], [search for CPLEX libraries in DIR]),
-              [], [with_cplex_libdir=no])
-
-  lx_cplex_found=no
-  if test x"$with_cplex" != x"no"; then
-    AC_MSG_CHECKING([for CPLEX])
-
-    if test x"$with_cplex_includedir" != x"no"; then
-      CPLEX_CFLAGS="-I$with_cplex_includedir"
-    elif test x"$with_cplex" != x"yes"; then
-      CPLEX_CFLAGS="-I$with_cplex/include"
-    elif test x"$CPLEX_INCLUDEDIR" != x; then
-      CPLEX_CFLAGS="-I$CPLEX_INCLUDEDIR"
-    fi
-
-    if test x"$with_cplex_libdir" != x"no"; then
-      CPLEX_LDFLAGS="-L$with_cplex_libdir"
-    elif test x"$with_cplex" != x"yes"; then
-      CPLEX_LDFLAGS="-L$with_cplex/lib"
-    elif test x"$CPLEX_LIBDIR" != x; then
-      CPLEX_LDFLAGS="-L$CPLEX_LIBDIR"
-    fi
-    CPLEX_LIBS="-lcplex -lm -lpthread"
-
-    lx_save_cxxflags="$CXXFLAGS"
-    lx_save_ldflags="$LDFLAGS"
-    lx_save_libs="$LIBS"
-    CXXFLAGS="$CPLEX_CFLAGS"
-    LDFLAGS="$CPLEX_LDFLAGS"
-    LIBS="$CPLEX_LIBS"
-
-    lx_cplex_test_prog='
-      extern "C" {
-      #include <ilcplex/cplex.h>
-      }
-
-      int main(int argc, char** argv)
-      {
-        CPXENVptr env = NULL;
-        return 0;
-      }'
-
-    AC_LANG_PUSH(C++)
-    AC_LINK_IFELSE([$lx_cplex_test_prog], [lx_cplex_found=yes], [lx_cplex_found=no])
-    AC_LANG_POP(C++)
-
-    CXXFLAGS="$lx_save_cxxflags"
-    LDFLAGS="$lx_save_ldflags"
-    LIBS="$lx_save_libs"
-
-    if test x"$lx_cplex_found" = x"yes"; then
-      AC_DEFINE([LEMON_HAVE_CPLEX], [1], [Define to 1 if you have CPLEX.])
-      lx_lp_found=yes
-      AC_DEFINE([LEMON_HAVE_LP], [1], [Define to 1 if you have any LP solver.])
-      lx_mip_found=yes
-      AC_DEFINE([LEMON_HAVE_MIP], [1], [Define to 1 if you have any MIP solver.])
-      AC_MSG_RESULT([yes])
-    else
-      CPLEX_CFLAGS=""
-      CPLEX_LDFLAGS=""
-      CPLEX_LIBS=""
-      AC_MSG_RESULT([no])
-    fi
-  fi
-  CPLEX_LIBS="$CPLEX_LDFLAGS $CPLEX_LIBS"
-  AC_SUBST(CPLEX_CFLAGS)
-  AC_SUBST(CPLEX_LIBS)
-  AM_CONDITIONAL([HAVE_CPLEX], [test x"$lx_cplex_found" = x"yes"])
-])
Index: lx_check_glpk.m4
===================================================================
--- m4/lx_check_glpk.m4	(revision 627)
+++ 	(revision )
@@ -1,84 +1,0 @@
-AC_DEFUN([LX_CHECK_GLPK],
-[
-  AC_ARG_WITH([glpk],
-AS_HELP_STRING([--with-glpk@<:@=PREFIX@:>@], [search for GLPK under PREFIX or under the default search paths if PREFIX is not given @<:@default@:>@])
-AS_HELP_STRING([--without-glpk], [disable checking for GLPK]),
-              [], [with_glpk=yes])
-
-  AC_ARG_WITH([glpk-includedir],
-AS_HELP_STRING([--with-glpk-includedir=DIR], [search for GLPK headers in DIR]),
-              [], [with_glpk_includedir=no])
-
-  AC_ARG_WITH([glpk-libdir],
-AS_HELP_STRING([--with-glpk-libdir=DIR], [search for GLPK libraries in DIR]),
-              [], [with_glpk_libdir=no])
-
-  lx_glpk_found=no
-  if test x"$with_glpk" != x"no"; then
-    AC_MSG_CHECKING([for GLPK])
-
-    if test x"$with_glpk_includedir" != x"no"; then
-      GLPK_CFLAGS="-I$with_glpk_includedir"
-    elif test x"$with_glpk" != x"yes"; then
-      GLPK_CFLAGS="-I$with_glpk/include"
-    fi
-
-    if test x"$with_glpk_libdir" != x"no"; then
-      GLPK_LDFLAGS="-L$with_glpk_libdir"
-    elif test x"$with_glpk" != x"yes"; then
-      GLPK_LDFLAGS="-L$with_glpk/lib"
-    fi
-    GLPK_LIBS="-lglpk"
-
-    lx_save_cxxflags="$CXXFLAGS"
-    lx_save_ldflags="$LDFLAGS"
-    lx_save_libs="$LIBS"
-    CXXFLAGS="$GLPK_CFLAGS"
-    LDFLAGS="$GLPK_LDFLAGS"
-    LIBS="$GLPK_LIBS"
-
-    lx_glpk_test_prog='
-      extern "C" {
-      #include <glpk.h>
-      }
-
-      #if (GLP_MAJOR_VERSION < 4) \
-         || (GLP_MAJOR_VERSION == 4 && GLP_MINOR_VERSION < 33)
-      #error Supported GLPK versions: 4.33 or above
-      #endif
-
-      int main(int argc, char** argv)
-      {
-        LPX *lp;
-        lp = lpx_create_prob();
-        lpx_delete_prob(lp);
-        return 0;
-      }'
-
-    AC_LANG_PUSH(C++)
-    AC_LINK_IFELSE([$lx_glpk_test_prog], [lx_glpk_found=yes], [lx_glpk_found=no])
-    AC_LANG_POP(C++)
-
-    CXXFLAGS="$lx_save_cxxflags"
-    LDFLAGS="$lx_save_ldflags"
-    LIBS="$lx_save_libs"
-
-    if test x"$lx_glpk_found" = x"yes"; then
-      AC_DEFINE([LEMON_HAVE_GLPK], [1], [Define to 1 if you have GLPK.])
-      lx_lp_found=yes
-      AC_DEFINE([LEMON_HAVE_LP], [1], [Define to 1 if you have any LP solver.])
-      lx_mip_found=yes
-      AC_DEFINE([LEMON_HAVE_MIP], [1], [Define to 1 if you have any MIP solver.])
-      AC_MSG_RESULT([yes])
-    else
-      GLPK_CFLAGS=""
-      GLPK_LDFLAGS=""
-      GLPK_LIBS=""
-      AC_MSG_RESULT([no])
-    fi
-  fi
-  GLPK_LIBS="$GLPK_LDFLAGS $GLPK_LIBS"
-  AC_SUBST(GLPK_CFLAGS)
-  AC_SUBST(GLPK_LIBS)
-  AM_CONDITIONAL([HAVE_GLPK], [test x"$lx_glpk_found" = x"yes"])
-])
Index: lx_check_soplex.m4
===================================================================
--- m4/lx_check_soplex.m4	(revision 627)
+++ 	(revision )
@@ -1,73 +1,0 @@
-AC_DEFUN([LX_CHECK_SOPLEX],
-[
-  AC_ARG_WITH([soplex],
-AS_HELP_STRING([--with-soplex@<:@=PREFIX@:>@], [search for SOPLEX under PREFIX or under the default search paths if PREFIX is not given @<:@default@:>@])
-AS_HELP_STRING([--without-soplex], [disable checking for SOPLEX]),
-              [], [with_soplex=yes])
-
-  AC_ARG_WITH([soplex-includedir],
-AS_HELP_STRING([--with-soplex-includedir=DIR], [search for SOPLEX headers in DIR]),
-              [], [with_soplex_includedir=no])
-
-  AC_ARG_WITH([soplex-libdir],
-AS_HELP_STRING([--with-soplex-libdir=DIR], [search for SOPLEX libraries in DIR]),
-              [], [with_soplex_libdir=no])
-
-  lx_soplex_found=no
-  if test x"$with_soplex" != x"no"; then
-    AC_MSG_CHECKING([for SOPLEX])
-
-    if test x"$with_soplex_includedir" != x"no"; then
-      SOPLEX_CXXFLAGS="-I$with_soplex_includedir"
-    elif test x"$with_soplex" != x"yes"; then
-      SOPLEX_CXXFLAGS="-I$with_soplex/src"
-    fi
-
-    if test x"$with_soplex_libdir" != x"no"; then
-      SOPLEX_LDFLAGS="-L$with_soplex_libdir"
-    elif test x"$with_soplex" != x"yes"; then
-      SOPLEX_LDFLAGS="-L$with_soplex/lib"
-    fi
-    SOPLEX_LIBS="-lsoplex -lz"
-
-    lx_save_cxxflags="$CXXFLAGS"
-    lx_save_ldflags="$LDFLAGS"
-    lx_save_libs="$LIBS"
-    CXXFLAGS="$SOPLEX_CXXFLAGS"
-    LDFLAGS="$SOPLEX_LDFLAGS"
-    LIBS="$SOPLEX_LIBS"
-
-    lx_soplex_test_prog='
-      #include <soplex.h>
-
-      int main(int argc, char** argv)
-      {
-        soplex::SoPlex soplex;
-        return 0;
-      }'
-
-    AC_LANG_PUSH(C++)
-    AC_LINK_IFELSE([$lx_soplex_test_prog], [lx_soplex_found=yes], [lx_soplex_found=no])
-    AC_LANG_POP(C++)
-
-    CXXFLAGS="$lx_save_cxxflags"
-    LDFLAGS="$lx_save_ldflags"
-    LIBS="$lx_save_libs"
-
-    if test x"$lx_soplex_found" = x"yes"; then
-      AC_DEFINE([LEMON_HAVE_SOPLEX], [1], [Define to 1 if you have SOPLEX.])
-      lx_lp_found=yes
-      AC_DEFINE([LEMON_HAVE_LP], [1], [Define to 1 if you have any LP solver.])
-      AC_MSG_RESULT([yes])
-    else
-      SOPLEX_CXXFLAGS=""
-      SOPLEX_LDFLAGS=""
-      SOPLEX_LIBS=""
-      AC_MSG_RESULT([no])
-    fi
-  fi
-  SOPLEX_LIBS="$SOPLEX_LDFLAGS $SOPLEX_LIBS"
-  AC_SUBST(SOPLEX_CXXFLAGS)
-  AC_SUBST(SOPLEX_LIBS)
-  AM_CONDITIONAL([HAVE_SOPLEX], [test x"$lx_soplex_found" = x"yes"])
-])
Index: ripts/Makefile.am
===================================================================
--- scripts/Makefile.am	(revision 793)
+++ 	(revision )
@@ -1,7 +1,0 @@
-EXTRA_DIST += \
-	scripts/bib2dox.py \
-	scripts/bootstrap.sh \
-	scripts/chg-len.py \
-	scripts/mk-release.sh \
-	scripts/unify-sources.sh \
-	scripts/valgrind-wrapper.sh
Index: ripts/bootstrap.sh
===================================================================
--- scripts/bootstrap.sh	(revision 850)
+++ 	(revision )
@@ -1,157 +1,0 @@
-#!/bin/bash
-#
-# This file is a part of LEMON, a generic C++ optimization library.
-#
-# Copyright (C) 2003-2009
-# Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
-# (Egervary Research Group on Combinatorial Optimization, EGRES).
-#
-# Permission to use, modify and distribute this software is granted
-# provided that this copyright notice appears in all copies. For
-# precise terms see the accompanying LICENSE file.
-#
-# This software is provided "AS IS" with no warranty of any kind,
-# express or implied, and with no claim as to its suitability for any
-# purpose.
-
-
-if [ ! -f ~/.lemon-bootstrap ]; then
-    echo 'Create ~/.lemon-bootstrap'.
-    cat >~/.lemon-bootstrap <<EOF
-#
-# Default settings for bootstraping the LEMON source code repository
-#
-EOF
-fi
-
-source ~/.lemon-bootstrap
-if [ -f ../../../.lemon-bootstrap ]; then source ../../../.lemon-bootstrap; fi
-if [ -f ../../.lemon-bootstrap ]; then source ../../.lemon-bootstrap; fi
-if [ -f ../.lemon-bootstrap ]; then source ../.lemon-bootstrap; fi
-if [ -f ./.lemon-bootstrap ]; then source ./.lemon-bootstrap; fi
-
-
-function augment_config() { 
-    if [ "x${!1}" == "x" ]; then
-        eval $1=$2
-        echo Add "'$1'" to '~/.lemon-bootstrap'.
-        echo >>~/.lemon-bootstrap
-        echo $3 >>~/.lemon-bootstrap
-        echo $1=$2 >>~/.lemon-bootstrap
-    fi
-}
-
-augment_config LEMON_INSTALL_PREFIX /usr/local \
-    "# LEMON installation prefix"
-
-augment_config GLPK_PREFIX /usr/local/ \
-    "# GLPK installation root prefix"
-
-augment_config COIN_OR_PREFIX /usr/local/coin-or \
-    "# COIN-OR installation root prefix (used for CLP/CBC)"
-
-augment_config SOPLEX_PREFIX /usr/local/soplex \
-    "# Soplex build prefix"
-
-
-function ask() {
-echo -n "$1 [$2]? "
-read _an
-if [ "x$_an" == "x" ]; then
-    ret="$2"
-else
-    ret=$_an
-fi
-}
-
-function yesorno() {
-    ret='rossz'
-    while [ "$ret" != "y" -a "$ret" != "n" -a "$ret" != "yes" -a "$ret" != "no" ]; do
-        ask "$1" "$2"
-    done
-    if [ "$ret" != "y" -a "$ret" != "yes" ]; then
-        return 1
-    else
-        return 0
-    fi
-}
-
-if yesorno "External build" "n"
-then
-    CONFIGURE_PATH=".."
-else
-    CONFIGURE_PATH="."
-    if yesorno "Autoreconf" "y"
-    then
-        AUTORE=yes
-    else
-        AUTORE=no
-    fi
-fi
-
-if yesorno "Optimize" "n" 
-then
-    opt_flags=' -O2'
-else
-    opt_flags=''
-fi
-
-if yesorno "Stop on warning" "y" 
-then
-    werror_flags=' -Werror'
-else
-    werror_flags=''
-fi
-
-cxx_flags="CXXFLAGS=-ggdb$opt_flags$werror_flags"
-
-if yesorno "Check with valgrind" "n" 
-then
-    valgrind_flags=' --enable-valgrind'
-else
-    valgrind_flags=''
-fi
-
-if [ -f ${GLPK_PREFIX}/include/glpk.h ]; then
-    if yesorno "Use GLPK" "y"
-    then
-        glpk_flag="--with-glpk=$GLPK_PREFIX"
-    else
-        glpk_flag="--without-glpk"
-    fi
-else
-    glpk_flag="--without-glpk"        
-fi
-
-if [ -f ${COIN_OR_PREFIX}/include/coin/config_coinutils.h ]; then
-    if yesorno "Use COIN-OR (CBC/CLP)" "n"
-    then
-        coin_flag="--with-coin=$COIN_OR_PREFIX"
-    else
-        coin_flag="--without-coin"
-    fi
-else
-    coin_flag="--without-coin"        
-fi
-
-if [ -f ${SOPLEX_PREFIX}/src/soplex.h ]; then
-    if yesorno "Use Soplex" "n"
-    then
-        soplex_flag="--with-soplex=$SOPLEX_PREFIX"
-    else
-        soplex_flag="--without-soplex"
-    fi
-else
-    soplex_flag="--without-soplex"
-fi
-
-if [ "x$AUTORE" == "xyes" ]; then
-    autoreconf -vif;
-fi
-${CONFIGURE_PATH}/configure --prefix=$LEMON_INSTALL_PREFIX \
-$valgrind_flags \
-"$cxx_flags" \
-$glpk_flag \
-$coin_flag \
-$soplex_flag \
-$*
Index: ripts/chg-len.py
===================================================================
--- scripts/chg-len.py	(revision 733)
+++ 	(revision )
@@ -1,46 +1,0 @@
-#! /usr/bin/env python
-#
-# This file is a part of LEMON, a generic C++ optimization library.
-#
-# Copyright (C) 2003-2009
-# Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
-# (Egervary Research Group on Combinatorial Optimization, EGRES).
-#
-# Permission to use, modify and distribute this software is granted
-# provided that this copyright notice appears in all copies. For
-# precise terms see the accompanying LICENSE file.
-#
-# This software is provided "AS IS" with no warranty of any kind,
-# express or implied, and with no claim as to its suitability for any
-# purpose.
-
-import sys
-
-from mercurial import ui, hg
-from mercurial import util
-
-util.rcpath = lambda : []
-
-if len(sys.argv)>1 and sys.argv[1] in ["-h","--help"]:
-    print """
-This utility just prints the length of the longest path
-in the revision graph from revison 0 to the current one.
-"""
-    exit(0)
-
-u = ui.ui()
-r = hg.repository(u, ".")
-N = r.changectx(".").rev()
-lengths=[0]*(N+1)
-for i in range(N+1):
-    p=r.changectx(i).parents()
-    if p[0]:
-        p0=lengths[p[0].rev()]
-    else:
-        p0=-1
-    if len(p)>1 and p[1]:
-        p1=lengths[p[1].rev()]
-    else:
-        p1=-1
-    lengths[i]=max(p0,p1)+1
-print lengths[N]
Index: ripts/mk-release.sh
===================================================================
--- scripts/mk-release.sh	(revision 733)
+++ 	(revision )
@@ -1,49 +1,0 @@
-#!/bin/bash
-#
-# This file is a part of LEMON, a generic C++ optimization library.
-#
-# Copyright (C) 2003-2009
-# Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
-# (Egervary Research Group on Combinatorial Optimization, EGRES).
-#
-# Permission to use, modify and distribute this software is granted
-# provided that this copyright notice appears in all copies. For
-# precise terms see the accompanying LICENSE file.
-#
-# This software is provided "AS IS" with no warranty of any kind,
-# express or implied, and with no claim as to its suitability for any
-# purpose.
-
-set -e
-
-if [ $# = 0 ]; then
-    echo "Usage: $0 release-id"
-    exit 1
-else
-    export LEMON_VERSION=$1
-fi
-
-echo '*****************************************************************'
-echo ' Start making release tarballs for version '${LEMON_VERSION}
-echo '*****************************************************************'
-
-autoreconf -vif
-./configure
-
-make
-make html
-make distcheck
-tar xf lemon-${LEMON_VERSION}.tar.gz
-zip -r lemon-${LEMON_VERSION}.zip lemon-${LEMON_VERSION}
-mv lemon-${LEMON_VERSION}/doc/html lemon-doc-${LEMON_VERSION}
-tar czf lemon-doc-${LEMON_VERSION}.tar.gz lemon-doc-${LEMON_VERSION}
-zip -r lemon-doc-${LEMON_VERSION}.zip lemon-doc-${LEMON_VERSION}
-tar czf lemon-nodoc-${LEMON_VERSION}.tar.gz lemon-${LEMON_VERSION}
-zip -r lemon-nodoc-${LEMON_VERSION}.zip lemon-${LEMON_VERSION}
-hg tag -m 'LEMON '${LEMON_VERSION}' released ('$(hg par --template="{node|short}")' tagged as r'${LEMON_VERSION}')' r${LEMON_VERSION}
-
-rm -rf lemon-${LEMON_VERSION} lemon-doc-${LEMON_VERSION}
-
-echo '*****************************************************************'
-echo '  Release '${LEMON_VERSION}' has been created' 
-echo '*****************************************************************'
Index: test/CMakeLists.txt
===================================================================
--- test/CMakeLists.txt	(revision 998)
+++ test/CMakeLists.txt	(revision 1000)
@@ -38,7 +38,10 @@
   maps_test
   matching_test
+  max_cardinality_search_test
+  max_clique_test
   min_cost_arborescence_test
   min_cost_flow_test
   min_mean_cycle_test
+  nagamochi_ibaraki_test
   path_test
   planarity_test
Index: st/Makefile.am
===================================================================
--- test/Makefile.am	(revision 964)
+++ 	(revision )
@@ -1,101 +1,0 @@
-if USE_VALGRIND
-TESTS_ENVIRONMENT=$(top_srcdir)/scripts/valgrind-wrapper.sh
-endif
-
-EXTRA_DIST += \
-	test/CMakeLists.txt
-
-noinst_HEADERS += \
-	test/graph_test.h \
-	test/test_tools.h
-
-check_PROGRAMS += \
-	test/adaptors_test \
-	test/bellman_ford_test \
-	test/bfs_test \
-	test/circulation_test \
-	test/connectivity_test \
-	test/counter_test \
-	test/dfs_test \
-	test/digraph_test \
-	test/dijkstra_test \
-	test/dim_test \
-	test/edge_set_test \
-	test/error_test \
-	test/euler_test \
-	test/fractional_matching_test \
-	test/gomory_hu_test \
-	test/graph_copy_test \
-	test/graph_test \
-	test/graph_utils_test \
-	test/hao_orlin_test \
-	test/heap_test \
-	test/kruskal_test \
-	test/lgf_test \
-	test/maps_test \
-	test/matching_test \
-	test/min_cost_arborescence_test \
-	test/min_cost_flow_test \
-	test/min_mean_cycle_test \
-	test/path_test \
-	test/planarity_test \
-	test/preflow_test \
-	test/radix_sort_test \
-	test/random_test \
-	test/suurballe_test \
-	test/test_tools_fail \
-	test/test_tools_pass \
-	test/time_measure_test \
-	test/unionfind_test
-
-test_test_tools_pass_DEPENDENCIES = demo
-
-if HAVE_LP
-check_PROGRAMS += test/lp_test
-endif HAVE_LP
-if HAVE_MIP
-check_PROGRAMS += test/mip_test
-endif HAVE_MIP
-
-TESTS += $(check_PROGRAMS)
-XFAIL_TESTS += test/test_tools_fail$(EXEEXT)
-
-test_adaptors_test_SOURCES = test/adaptors_test.cc
-test_bellman_ford_test_SOURCES = test/bellman_ford_test.cc
-test_bfs_test_SOURCES = test/bfs_test.cc
-test_circulation_test_SOURCES = test/circulation_test.cc
-test_counter_test_SOURCES = test/counter_test.cc
-test_connectivity_test_SOURCES = test/connectivity_test.cc
-test_dfs_test_SOURCES = test/dfs_test.cc
-test_digraph_test_SOURCES = test/digraph_test.cc
-test_dijkstra_test_SOURCES = test/dijkstra_test.cc
-test_dim_test_SOURCES = test/dim_test.cc
-test_edge_set_test_SOURCES = test/edge_set_test.cc
-test_error_test_SOURCES = test/error_test.cc
-test_euler_test_SOURCES = test/euler_test.cc
-test_fractional_matching_test_SOURCES = test/fractional_matching_test.cc
-test_gomory_hu_test_SOURCES = test/gomory_hu_test.cc
-test_graph_copy_test_SOURCES = test/graph_copy_test.cc
-test_graph_test_SOURCES = test/graph_test.cc
-test_graph_utils_test_SOURCES = test/graph_utils_test.cc
-test_heap_test_SOURCES = test/heap_test.cc
-test_kruskal_test_SOURCES = test/kruskal_test.cc
-test_hao_orlin_test_SOURCES = test/hao_orlin_test.cc
-test_lgf_test_SOURCES = test/lgf_test.cc
-test_lp_test_SOURCES = test/lp_test.cc
-test_maps_test_SOURCES = test/maps_test.cc
-test_mip_test_SOURCES = test/mip_test.cc
-test_matching_test_SOURCES = test/matching_test.cc
-test_min_cost_arborescence_test_SOURCES = test/min_cost_arborescence_test.cc
-test_min_cost_flow_test_SOURCES = test/min_cost_flow_test.cc
-test_min_mean_cycle_test_SOURCES = test/min_mean_cycle_test.cc
-test_path_test_SOURCES = test/path_test.cc
-test_planarity_test_SOURCES = test/planarity_test.cc
-test_preflow_test_SOURCES = test/preflow_test.cc
-test_radix_sort_test_SOURCES = test/radix_sort_test.cc
-test_suurballe_test_SOURCES = test/suurballe_test.cc
-test_random_test_SOURCES = test/random_test.cc
-test_test_tools_fail_SOURCES = test/test_tools_fail.cc
-test_test_tools_pass_SOURCES = test/test_tools_pass.cc
-test_time_measure_test_SOURCES = test/time_measure_test.cc
-test_unionfind_test_SOURCES = test/unionfind_test.cc
Index: test/graph_copy_test.cc
===================================================================
--- test/graph_copy_test.cc	(revision 893)
+++ test/graph_copy_test.cc	(revision 894)
@@ -19,4 +19,5 @@
 #include <lemon/smart_graph.h>
 #include <lemon/list_graph.h>
+#include <lemon/static_graph.h>
 #include <lemon/lgf_reader.h>
 #include <lemon/error.h>
@@ -27,4 +28,5 @@
 using namespace lemon;
 
+template <typename GR>
 void digraph_copy_test() {
   const int nn = 10;
@@ -52,17 +54,17 @@
     }
   }
-
+  
   // Test digraph copy
-  ListDigraph to;
-  ListDigraph::NodeMap<int> tnm(to);
-  ListDigraph::ArcMap<int> tam(to);
-  ListDigraph::Node tn;
-  ListDigraph::Arc ta;
-
-  SmartDigraph::NodeMap<ListDigraph::Node> nr(from);
-  SmartDigraph::ArcMap<ListDigraph::Arc> er(from);
-
-  ListDigraph::NodeMap<SmartDigraph::Node> ncr(to);
-  ListDigraph::ArcMap<SmartDigraph::Arc> ecr(to);
+  GR to;
+  typename GR::template NodeMap<int> tnm(to);
+  typename GR::template ArcMap<int> tam(to);
+  typename GR::Node tn;
+  typename GR::Arc ta;
+
+  SmartDigraph::NodeMap<typename GR::Node> nr(from);
+  SmartDigraph::ArcMap<typename GR::Arc> er(from);
+
+  typename GR::template NodeMap<SmartDigraph::Node> ncr(to);
+  typename GR::template ArcMap<SmartDigraph::Arc> ecr(to);
 
   digraphCopy(from, to).
@@ -87,9 +89,9 @@
   }
 
-  for (ListDigraph::NodeIt it(to); it != INVALID; ++it) {
+  for (typename GR::NodeIt it(to); it != INVALID; ++it) {
     check(nr[ncr[it]] == it, "Wrong copy.");
   }
 
-  for (ListDigraph::ArcIt it(to); it != INVALID; ++it) {
+  for (typename GR::ArcIt it(to); it != INVALID; ++it) {
     check(er[ecr[it]] == it, "Wrong copy.");
   }
@@ -104,4 +106,5 @@
 }
 
+template <typename GR>
 void graph_copy_test() {
   const int nn = 10;
@@ -136,19 +139,19 @@
 
   // Test graph copy
-  ListGraph to;
-  ListGraph::NodeMap<int> tnm(to);
-  ListGraph::ArcMap<int> tam(to);
-  ListGraph::EdgeMap<int> tem(to);
-  ListGraph::Node tn;
-  ListGraph::Arc ta;
-  ListGraph::Edge te;
-
-  SmartGraph::NodeMap<ListGraph::Node> nr(from);
-  SmartGraph::ArcMap<ListGraph::Arc> ar(from);
-  SmartGraph::EdgeMap<ListGraph::Edge> er(from);
-
-  ListGraph::NodeMap<SmartGraph::Node> ncr(to);
-  ListGraph::ArcMap<SmartGraph::Arc> acr(to);
-  ListGraph::EdgeMap<SmartGraph::Edge> ecr(to);
+  GR to;
+  typename GR::template NodeMap<int> tnm(to);
+  typename GR::template ArcMap<int> tam(to);
+  typename GR::template EdgeMap<int> tem(to);
+  typename GR::Node tn;
+  typename GR::Arc ta;
+  typename GR::Edge te;
+
+  SmartGraph::NodeMap<typename GR::Node> nr(from);
+  SmartGraph::ArcMap<typename GR::Arc> ar(from);
+  SmartGraph::EdgeMap<typename GR::Edge> er(from);
+
+  typename GR::template NodeMap<SmartGraph::Node> ncr(to);
+  typename GR::template ArcMap<SmartGraph::Arc> acr(to);
+  typename GR::template EdgeMap<SmartGraph::Edge> ecr(to);
 
   graphCopy(from, to).
@@ -185,12 +188,12 @@
   }
 
-  for (ListGraph::NodeIt it(to); it != INVALID; ++it) {
+  for (typename GR::NodeIt it(to); it != INVALID; ++it) {
     check(nr[ncr[it]] == it, "Wrong copy.");
   }
 
-  for (ListGraph::ArcIt it(to); it != INVALID; ++it) {
+  for (typename GR::ArcIt it(to); it != INVALID; ++it) {
     check(ar[acr[it]] == it, "Wrong copy.");
   }
-  for (ListGraph::EdgeIt it(to); it != INVALID; ++it) {
+  for (typename GR::EdgeIt it(to); it != INVALID; ++it) {
     check(er[ecr[it]] == it, "Wrong copy.");
   }
@@ -209,6 +212,9 @@
 
 int main() {
-  digraph_copy_test();
-  graph_copy_test();
+  digraph_copy_test<SmartDigraph>();
+  digraph_copy_test<ListDigraph>();
+  digraph_copy_test<StaticDigraph>();
+  graph_copy_test<SmartGraph>();
+  graph_copy_test<ListGraph>();
 
   return 0;
Index: test/max_cardinality_search_test.cc
===================================================================
--- test/max_cardinality_search_test.cc	(revision 955)
+++ test/max_cardinality_search_test.cc	(revision 955)
@@ -0,0 +1,162 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include <iostream>
+
+#include "test_tools.h"
+#include <lemon/smart_graph.h>
+#include <lemon/max_cardinality_search.h>
+#include <lemon/concepts/digraph.h>
+#include <lemon/concepts/maps.h>
+#include <lemon/concepts/heap.h>
+#include <lemon/lgf_reader.h>
+
+using namespace lemon;
+using namespace std;
+
+char test_lgf[] =
+  "@nodes\n"
+  "label\n"
+  "0\n"
+  "1\n"
+  "2\n"
+  "3\n"
+  "@arcs\n"
+  "    label capacity\n"
+  "0 1 0     2\n"
+  "1 0 1     2\n"
+  "2 1 2     1\n"
+  "2 3 3     3\n"
+  "3 2 4     3\n"
+  "3 1 5     5\n"
+  "@attributes\n"
+  "s 0\n"
+  "x 1\n"
+  "y 2\n"
+  "z 3\n";
+
+void checkMaxCardSearchCompile() {
+
+  typedef concepts::Digraph Digraph;
+  typedef int Value;
+  typedef Digraph::Node Node;
+  typedef Digraph::Arc Arc;
+  typedef concepts::ReadMap<Arc,Value> CapMap;
+  typedef concepts::ReadWriteMap<Node,Value> CardMap;
+  typedef concepts::ReadWriteMap<Node,bool> ProcMap;
+  typedef Digraph::NodeMap<int> HeapCrossRef;
+
+  Digraph g;
+  Node n,s;
+  CapMap cap;
+  CardMap card;
+  ProcMap proc;
+  HeapCrossRef crossref(g);
+  
+  typedef MaxCardinalitySearch<Digraph,CapMap>
+    ::SetCapacityMap<CapMap>
+    ::SetCardinalityMap<CardMap>
+    ::SetProcessedMap<ProcMap>
+    ::SetStandardHeap<BinHeap<Value,HeapCrossRef> >
+    ::Create MaxCardType;
+
+  MaxCardType maxcard(g,cap);
+  const MaxCardType& const_maxcard = maxcard;
+
+  const MaxCardType::Heap& heap_const = const_maxcard.heap();
+  MaxCardType::Heap& heap = const_cast<MaxCardType::Heap&>(heap_const);
+  maxcard.heap(heap,crossref);
+  
+  maxcard.capacityMap(cap).cardinalityMap(card).processedMap(proc);
+
+  maxcard.init();
+  maxcard.addSource(s);
+  n = maxcard.nextNode();
+   maxcard.processNextNode();
+   maxcard.start();
+   maxcard.run(s);
+   maxcard.run();
+ }
+
+ void checkWithIntMap( std::istringstream& input)
+ {
+   typedef SmartDigraph Digraph;
+   typedef Digraph::Node Node;
+   typedef Digraph::ArcMap<int> CapMap;
+
+   Digraph g;
+   Node s,x,y,z,a;
+   CapMap cap(g);
+
+   DigraphReader<Digraph>(g,input).
+     arcMap("capacity", cap).
+     node("s",s).
+     node("x",x).
+     node("y",y).
+     node("z",z).
+     run();
+
+   MaxCardinalitySearch<Digraph,CapMap> maxcard(g,cap);
+
+   maxcard.init();
+   maxcard.addSource(s);
+   maxcard.start(x);
+
+   check(maxcard.processed(s) && !maxcard.processed(x) &&
+         !maxcard.processed(y), "Wrong processed()!");
+
+   a=maxcard.nextNode();
+   check(maxcard.processNextNode()==a,
+         "Wrong nextNode() or processNextNode() return value!");
+
+   check(maxcard.processed(a), "Wrong processNextNode()!");
+
+   maxcard.start();
+   check(maxcard.cardinality(x)==2 && maxcard.cardinality(y)>=4,
+         "Wrong cardinalities!");
+ }
+
+ void checkWithConst1Map(std::istringstream &input) {
+   typedef SmartDigraph Digraph;
+   typedef Digraph::Node Node;
+
+   Digraph g;
+   Node s,x,y,z;
+
+  DigraphReader<Digraph>(g,input).
+    node("s",s).
+    node("x",x).
+    node("y",y).
+    node("z",z).
+    run();
+
+  MaxCardinalitySearch<Digraph> maxcard(g);
+  maxcard.run(s);
+  check(maxcard.cardinality(x)==1 &&
+        maxcard.cardinality(y)+maxcard.cardinality(z)==3,
+        "Wrong cardinalities!");
+}
+
+int main() {
+
+  std::istringstream input1(test_lgf);
+  checkWithIntMap(input1);
+
+  std::istringstream input2(test_lgf);
+  checkWithConst1Map(input2);
+}
Index: test/max_clique_test.cc
===================================================================
--- test/max_clique_test.cc	(revision 918)
+++ test/max_clique_test.cc	(revision 918)
@@ -0,0 +1,188 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include <sstream>
+#include <lemon/list_graph.h>
+#include <lemon/full_graph.h>
+#include <lemon/grid_graph.h>
+#include <lemon/lgf_reader.h>
+#include <lemon/grosso_locatelli_pullan_mc.h>
+
+#include "test_tools.h"
+
+using namespace lemon;
+
+char test_lgf[] =
+  "@nodes\n"
+  "label max_clique\n"
+  "1     0\n"
+  "2     0\n"
+  "3     0\n"
+  "4     1\n"
+  "5     1\n"
+  "6     1\n"
+  "7     1\n"
+  "@edges\n"
+  "    label\n"
+  "1 2     1\n"
+  "1 3     2\n"
+  "1 4     3\n"
+  "1 6     4\n"
+  "2 3     5\n"
+  "2 5     6\n"
+  "2 7     7\n"
+  "3 4     8\n"
+  "3 5     9\n"
+  "4 5    10\n"
+  "4 6    11\n"
+  "4 7    12\n"
+  "5 6    13\n"
+  "5 7    14\n"
+  "6 7    15\n";
+      
+
+// Check with general graphs
+template <typename Param>
+void checkMaxCliqueGeneral(Param rule) {
+  typedef ListGraph GR;
+  typedef GrossoLocatelliPullanMc<GR> McAlg;
+  typedef McAlg::CliqueNodeIt CliqueIt;
+  
+  // Basic tests
+  {
+    GR g;
+    GR::NodeMap<bool> map(g);
+    McAlg mc(g);
+    mc.iterationLimit(50);
+    check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == 0, "Wrong clique size");
+    check(CliqueIt(mc) == INVALID, "Wrong CliqueNodeIt");
+
+    GR::Node u = g.addNode();
+    check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == 1, "Wrong clique size");
+    mc.cliqueMap(map);
+    check(map[u], "Wrong clique map");
+    CliqueIt it1(mc);
+    check(static_cast<GR::Node>(it1) == u && ++it1 == INVALID,
+          "Wrong CliqueNodeIt");
+    
+    GR::Node v = g.addNode();
+    check(mc.run(rule) == McAlg::ITERATION_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == 1, "Wrong clique size");
+    mc.cliqueMap(map);
+    check((map[u] && !map[v]) || (map[v] && !map[u]), "Wrong clique map");
+    CliqueIt it2(mc);
+    check(it2 != INVALID && ++it2 == INVALID, "Wrong CliqueNodeIt");
+
+    g.addEdge(u, v);
+    check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == 2, "Wrong clique size");
+    mc.cliqueMap(map);
+    check(map[u] && map[v], "Wrong clique map");
+    CliqueIt it3(mc);
+    check(it3 != INVALID && ++it3 != INVALID && ++it3 == INVALID,
+          "Wrong CliqueNodeIt");
+  }
+
+  // Test graph
+  {
+    GR g;
+    GR::NodeMap<bool> max_clique(g);
+    GR::NodeMap<bool> map(g);
+    std::istringstream input(test_lgf);
+    graphReader(g, input)
+      .nodeMap("max_clique", max_clique)
+      .run();
+    
+    McAlg mc(g);
+    mc.iterationLimit(50);
+    check(mc.run(rule) == McAlg::ITERATION_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == 4, "Wrong clique size");
+    mc.cliqueMap(map);
+    for (GR::NodeIt n(g); n != INVALID; ++n) {
+      check(map[n] == max_clique[n], "Wrong clique map");
+    }
+    int cnt = 0;
+    for (CliqueIt n(mc); n != INVALID; ++n) {
+      cnt++;
+      check(map[n] && max_clique[n], "Wrong CliqueNodeIt");
+    }
+    check(cnt == 4, "Wrong CliqueNodeIt");
+  }
+}
+
+// Check with full graphs
+template <typename Param>
+void checkMaxCliqueFullGraph(Param rule) {
+  typedef FullGraph GR;
+  typedef GrossoLocatelliPullanMc<FullGraph> McAlg;
+  typedef McAlg::CliqueNodeIt CliqueIt;
+  
+  for (int size = 0; size <= 40; size = size * 3 + 1) {
+    GR g(size);
+    GR::NodeMap<bool> map(g);
+    McAlg mc(g);
+    check(mc.run(rule) == McAlg::SIZE_LIMIT, "Wrong termination cause");
+    check(mc.cliqueSize() == size, "Wrong clique size");
+    mc.cliqueMap(map);
+    for (GR::NodeIt n(g); n != INVALID; ++n) {
+      check(map[n], "Wrong clique map");
+    }
+    int cnt = 0;
+    for (CliqueIt n(mc); n != INVALID; ++n) cnt++;
+    check(cnt == size, "Wrong CliqueNodeIt");
+  }
+}
+
+// Check with grid graphs
+template <typename Param>
+void checkMaxCliqueGridGraph(Param rule) {
+  GridGraph g(5, 7);
+  GridGraph::NodeMap<char> map(g);
+  GrossoLocatelliPullanMc<GridGraph> mc(g);
+  
+  mc.iterationLimit(100);
+  check(mc.run(rule) == mc.ITERATION_LIMIT, "Wrong termination cause");
+  check(mc.cliqueSize() == 2, "Wrong clique size");
+
+  mc.stepLimit(100);
+  check(mc.run(rule) == mc.STEP_LIMIT, "Wrong termination cause");
+  check(mc.cliqueSize() == 2, "Wrong clique size");
+
+  mc.sizeLimit(2);
+  check(mc.run(rule) == mc.SIZE_LIMIT, "Wrong termination cause");
+  check(mc.cliqueSize() == 2, "Wrong clique size");
+}
+
+
+int main() {
+  checkMaxCliqueGeneral(GrossoLocatelliPullanMc<ListGraph>::RANDOM);
+  checkMaxCliqueGeneral(GrossoLocatelliPullanMc<ListGraph>::DEGREE_BASED);
+  checkMaxCliqueGeneral(GrossoLocatelliPullanMc<ListGraph>::PENALTY_BASED);
+
+  checkMaxCliqueFullGraph(GrossoLocatelliPullanMc<FullGraph>::RANDOM);
+  checkMaxCliqueFullGraph(GrossoLocatelliPullanMc<FullGraph>::DEGREE_BASED);
+  checkMaxCliqueFullGraph(GrossoLocatelliPullanMc<FullGraph>::PENALTY_BASED);
+                       
+  checkMaxCliqueGridGraph(GrossoLocatelliPullanMc<GridGraph>::RANDOM);
+  checkMaxCliqueGridGraph(GrossoLocatelliPullanMc<GridGraph>::DEGREE_BASED);
+  checkMaxCliqueGridGraph(GrossoLocatelliPullanMc<GridGraph>::PENALTY_BASED);
+                       
+  return 0;
+}
Index: test/nagamochi_ibaraki_test.cc
===================================================================
--- test/nagamochi_ibaraki_test.cc	(revision 913)
+++ test/nagamochi_ibaraki_test.cc	(revision 913)
@@ -0,0 +1,141 @@
+/* -*- mode: C++; indent-tabs-mode: nil; -*-
+ *
+ * This file is a part of LEMON, a generic C++ optimization library.
+ *
+ * Copyright (C) 2003-2010
+ * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
+ * (Egervary Research Group on Combinatorial Optimization, EGRES).
+ *
+ * Permission to use, modify and distribute this software is granted
+ * provided that this copyright notice appears in all copies. For
+ * precise terms see the accompanying LICENSE file.
+ *
+ * This software is provided "AS IS" with no warranty of any kind,
+ * express or implied, and with no claim as to its suitability for any
+ * purpose.
+ *
+ */
+
+#include <sstream>
+
+#include <lemon/smart_graph.h>
+#include <lemon/adaptors.h>
+#include <lemon/concepts/graph.h>
+#include <lemon/concepts/maps.h>
+#include <lemon/lgf_reader.h>
+#include <lemon/nagamochi_ibaraki.h>
+
+#include "test_tools.h"
+
+using namespace lemon;
+using namespace std;
+
+const std::string lgf =
+  "@nodes\n"
+  "label\n"
+  "0\n"
+  "1\n"
+  "2\n"
+  "3\n"
+  "4\n"
+  "5\n"
+  "@edges\n"
+  "     cap1 cap2 cap3\n"
+  "0 1  1    1    1   \n"
+  "0 2  2    2    4   \n"
+  "1 2  4    4    4   \n"
+  "3 4  1    1    1   \n"
+  "3 5  2    2    4   \n"
+  "4 5  4    4    4   \n"
+  "2 3  1    6    6   \n";
+
+void checkNagamochiIbarakiCompile()
+{
+  typedef int Value;
+  typedef concepts::Graph Graph;
+
+  typedef Graph::Node Node;
+  typedef Graph::Edge Edge;
+  typedef concepts::ReadMap<Edge, Value> CapMap;
+  typedef concepts::WriteMap<Node, bool> CutMap;
+
+  Graph g;
+  Node n;
+  CapMap cap;
+  CutMap cut;
+  Value v;
+  bool b;
+
+  NagamochiIbaraki<Graph, CapMap> ni_test(g, cap);
+  const NagamochiIbaraki<Graph, CapMap>& const_ni_test = ni_test;
+
+  ni_test.init();
+  ni_test.start();
+  b = ni_test.processNextPhase();
+  ni_test.run();
+
+  v = const_ni_test.minCutValue();
+  v = const_ni_test.minCutMap(cut);
+}
+
+template <typename Graph, typename CapMap, typename CutMap>
+typename CapMap::Value
+  cutValue(const Graph& graph, const CapMap& cap, const CutMap& cut)
+{
+  typename CapMap::Value sum = 0;
+  for (typename Graph::EdgeIt e(graph); e != INVALID; ++e) {
+    if (cut[graph.u(e)] != cut[graph.v(e)]) {
+      sum += cap[e];
+    }
+  }
+  return sum;
+}
+
+int main() {
+  SmartGraph graph;
+  SmartGraph::EdgeMap<int> cap1(graph), cap2(graph), cap3(graph);
+  SmartGraph::NodeMap<bool> cut(graph);
+
+  istringstream input(lgf);
+  graphReader(graph, input)
+    .edgeMap("cap1", cap1)
+    .edgeMap("cap2", cap2)
+    .edgeMap("cap3", cap3)
+    .run();
+
+  {
+    NagamochiIbaraki<SmartGraph> ni(graph, cap1);
+    ni.run();
+    ni.minCutMap(cut);
+
+    check(ni.minCutValue() == 1, "Wrong cut value");
+    check(ni.minCutValue() == cutValue(graph, cap1, cut), "Wrong cut value");
+  }
+  {
+    NagamochiIbaraki<SmartGraph> ni(graph, cap2);
+    ni.run();
+    ni.minCutMap(cut);
+
+    check(ni.minCutValue() == 3, "Wrong cut value");
+    check(ni.minCutValue() == cutValue(graph, cap2, cut), "Wrong cut value");
+  }
+  {
+    NagamochiIbaraki<SmartGraph> ni(graph, cap3);
+    ni.run();
+    ni.minCutMap(cut);
+
+    check(ni.minCutValue() == 5, "Wrong cut value");
+    check(ni.minCutValue() == cutValue(graph, cap3, cut), "Wrong cut value");
+  }
+  {
+    NagamochiIbaraki<SmartGraph>::SetUnitCapacity::Create ni(graph);
+    ni.run();
+    ni.minCutMap(cut);
+
+    ConstMap<SmartGraph::Edge, int> cap4(1);
+    check(ni.minCutValue() == 1, "Wrong cut value");
+    check(ni.minCutValue() == cutValue(graph, cap4, cut), "Wrong cut value");
+  }
+
+  return 0;
+}
Index: ols/Makefile.am
===================================================================
--- tools/Makefile.am	(revision 674)
+++ 	(revision )
@@ -1,17 +1,0 @@
-EXTRA_DIST += \
-	tools/CMakeLists.txt
-
-if WANT_TOOLS
-
-bin_PROGRAMS += \
-	tools/dimacs-solver \
-	tools/dimacs-to-lgf \
-	tools/lgf-gen
-
-dist_bin_SCRIPTS += tools/lemon-0.x-to-1.x.sh
-
-endif WANT_TOOLS
-
-tools_dimacs_solver_SOURCES = tools/dimacs-solver.cc
-tools_dimacs_to_lgf_SOURCES = tools/dimacs-to-lgf.cc
-tools_lgf_gen_SOURCES = tools/lgf-gen.cc
