1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/doc/references.bib Sat Sep 26 10:15:49 2009 +0200
1.3 @@ -0,0 +1,341 @@
1.4 +%%%%% Defining LEMON %%%%%
1.5 +
1.6 +@misc{lemon,
1.7 + key = {LEMON},
1.8 + title = {{LEMON} -- {L}ibrary for {E}fficient {M}odeling and
1.9 + {O}ptimization in {N}etworks},
1.10 + howpublished = {\url{http://lemon.cs.elte.hu/}},
1.11 + year = 2009
1.12 +}
1.13 +
1.14 +@misc{egres,
1.15 + key = {EGRES},
1.16 + title = {{EGRES} -- {E}gerv{\'a}ry {R}esearch {G}roup on
1.17 + {C}ombinatorial {O}ptimization},
1.18 + howpublished = {\url{http://www.cs.elte.hu/egres/}},
1.19 + year = 2009
1.20 +}
1.21 +
1.22 +@misc{coinor,
1.23 + key = {COIN-OR},
1.24 + title = {{COIN-OR} -- {C}omputational {I}nfrastructure for
1.25 + {O}perations {R}esearch},
1.26 + howpublished = {\url{http://www.coin-or.org/}},
1.27 + year = 2009
1.28 +}
1.29 +
1.30 +
1.31 +%%%%% Other libraries %%%%%%
1.32 +
1.33 +@misc{boost,
1.34 + key = {Boost},
1.35 + title = {{B}oost {C++} {L}ibraries},
1.36 + howpublished = {\url{http://www.boost.org/}},
1.37 + year = 2009
1.38 +}
1.39 +
1.40 +@book{bglbook,
1.41 + author = {Jeremy G. Siek and Lee-Quan Lee and Andrew
1.42 + Lumsdaine},
1.43 + title = {The Boost Graph Library: User Guide and Reference
1.44 + Manual},
1.45 + publisher = {Addison-Wesley},
1.46 + year = 2002
1.47 +}
1.48 +
1.49 +@misc{leda,
1.50 + key = {LEDA},
1.51 + title = {{LEDA} -- {L}ibrary of {E}fficient {D}ata {T}ypes and
1.52 + {A}lgorithms},
1.53 + howpublished = {\url{http://www.algorithmic-solutions.com/}},
1.54 + year = 2009
1.55 +}
1.56 +
1.57 +@book{ledabook,
1.58 + author = {Kurt Mehlhorn and Stefan N{\"a}her},
1.59 + title = {{LEDA}: {A} platform for combinatorial and geometric
1.60 + computing},
1.61 + isbn = {0-521-56329-1},
1.62 + publisher = {Cambridge University Press},
1.63 + address = {New York, NY, USA},
1.64 + year = 1999
1.65 +}
1.66 +
1.67 +
1.68 +%%%%% Tools that LEMON depends on %%%%%
1.69 +
1.70 +@misc{cmake,
1.71 + key = {CMake},
1.72 + title = {{CMake} -- {C}ross {P}latform {M}ake},
1.73 + howpublished = {\url{http://www.cmake.org/}},
1.74 + year = 2009
1.75 +}
1.76 +
1.77 +@misc{doxygen,
1.78 + key = {Doxygen},
1.79 + title = {{Doxygen} -- {S}ource code documentation generator
1.80 + tool},
1.81 + howpublished = {\url{http://www.doxygen.org/}},
1.82 + year = 2009
1.83 +}
1.84 +
1.85 +
1.86 +%%%%% LP/MIP libraries %%%%%
1.87 +
1.88 +@misc{glpk,
1.89 + key = {GLPK},
1.90 + title = {{GLPK} -- {GNU} {L}inear {P}rogramming {K}it},
1.91 + howpublished = {\url{http://www.gnu.org/software/glpk/}},
1.92 + year = 2009
1.93 +}
1.94 +
1.95 +@misc{clp,
1.96 + key = {Clp},
1.97 + title = {{Clp} -- {Coin-Or} {L}inear {P}rogramming},
1.98 + howpublished = {\url{http://projects.coin-or.org/Clp/}},
1.99 + year = 2009
1.100 +}
1.101 +
1.102 +@misc{cbc,
1.103 + key = {Cbc},
1.104 + title = {{Cbc} -- {Coin-Or} {B}ranch and {C}ut},
1.105 + howpublished = {\url{http://projects.coin-or.org/Cbc/}},
1.106 + year = 2009
1.107 +}
1.108 +
1.109 +@misc{cplex,
1.110 + key = {CPLEX},
1.111 + title = {{ILOG} {CPLEX}},
1.112 + howpublished = {\url{http://www.ilog.com/}},
1.113 + year = 2009
1.114 +}
1.115 +
1.116 +@misc{soplex,
1.117 + key = {SoPlex},
1.118 + title = {{SoPlex} -- {T}he {S}equential {O}bject-{O}riented
1.119 + {S}implex},
1.120 + howpublished = {\url{http://soplex.zib.de/}},
1.121 + year = 2009
1.122 +}
1.123 +
1.124 +
1.125 +%%%%% General books %%%%%
1.126 +
1.127 +@book{amo93networkflows,
1.128 + author = {Ravindra K. Ahuja and Thomas L. Magnanti and James
1.129 + B. Orlin},
1.130 + title = {Network Flows: Theory, Algorithms, and Applications},
1.131 + publisher = {Prentice-Hall, Inc.},
1.132 + year = 1993,
1.133 + month = feb,
1.134 + isbn = {978-0136175490}
1.135 +}
1.136 +
1.137 +@book{schrijver03combinatorial,
1.138 + author = {Alexander Schrijver},
1.139 + title = {Combinatorial Optimization: Polyhedra and Efficiency},
1.140 + publisher = {Springer-Verlag},
1.141 + year = 2003,
1.142 + isbn = {978-3540443896}
1.143 +}
1.144 +
1.145 +@book{clrs01algorithms,
1.146 + author = {Thomas H. Cormen and Charles E. Leiserson and Ronald
1.147 + L. Rivest and Clifford Stein},
1.148 + title = {Introduction to Algorithms},
1.149 + publisher = {The MIT Press},
1.150 + year = 2001,
1.151 + edition = {2nd}
1.152 +}
1.153 +
1.154 +@book{stroustrup00cpp,
1.155 + author = {Bjarne Stroustrup},
1.156 + title = {The C++ Programming Language},
1.157 + edition = {3rd},
1.158 + publisher = {Addison-Wesley Professional},
1.159 + isbn = 0201700735,
1.160 + month = {February},
1.161 + year = 2000
1.162 +}
1.163 +
1.164 +
1.165 +%%%%% Maximum flow algorithms %%%%%
1.166 +
1.167 +@inproceedings{goldberg86newapproach,
1.168 + author = {Andrew V. Goldberg and Robert E. Tarjan},
1.169 + title = {A new approach to the maximum flow problem},
1.170 + booktitle = {STOC '86: Proceedings of the Eighteenth Annual ACM
1.171 + Symposium on Theory of Computing},
1.172 + year = 1986,
1.173 + publisher = {ACM Press},
1.174 + address = {New York, NY},
1.175 + pages = {136-146}
1.176 +}
1.177 +
1.178 +@article{dinic70algorithm,
1.179 + author = {E. A. Dinic},
1.180 + title = {Algorithm for solution of a problem of maximum flow
1.181 + in a network with power estimation},
1.182 + journal = {Soviet Math. Doklady},
1.183 + year = 1970,
1.184 + volume = 11,
1.185 + pages = {1277-1280}
1.186 +}
1.187 +
1.188 +@article{goldberg08partial,
1.189 + author = {Andrew V. Goldberg},
1.190 + title = {The Partial Augment-Relabel Algorithm for the
1.191 + Maximum Flow Problem},
1.192 + journal = {16th Annual European Symposium on Algorithms},
1.193 + year = 2008,
1.194 + pages = {466-477}
1.195 +}
1.196 +
1.197 +@article{sleator83dynamic,
1.198 + author = {Daniel D. Sleator and Robert E. Tarjan},
1.199 + title = {A data structure for dynamic trees},
1.200 + journal = {Journal of Computer and System Sciences},
1.201 + year = 1983,
1.202 + volume = 26,
1.203 + number = 3,
1.204 + pages = {362-391}
1.205 +}
1.206 +
1.207 +
1.208 +%%%%% Minimum mean cycle algorithms %%%%%
1.209 +
1.210 +@article{karp78characterization,
1.211 + author = {Richard M. Karp},
1.212 + title = {A characterization of the minimum cycle mean in a
1.213 + digraph},
1.214 + journal = {Discrete Math.},
1.215 + year = 1978,
1.216 + volume = 23,
1.217 + pages = {309-311}
1.218 +}
1.219 +
1.220 +@article{dasdan98minmeancycle,
1.221 + author = {Ali Dasdan and Rajesh K. Gupta},
1.222 + title = {Faster Maximum and Minimum Mean Cycle Alogrithms for
1.223 + System Performance Analysis},
1.224 + journal = {IEEE Transactions on Computer-Aided Design of
1.225 + Integrated Circuits and Systems},
1.226 + year = 1998,
1.227 + volume = 17,
1.228 + number = 10,
1.229 + pages = {889-899}
1.230 +}
1.231 +
1.232 +
1.233 +%%%%% Minimum cost flow algorithms %%%%%
1.234 +
1.235 +@article{klein67primal,
1.236 + author = {Morton Klein},
1.237 + title = {A primal method for minimal cost flows with
1.238 + applications to the assignment and transportation
1.239 + problems},
1.240 + journal = {Management Science},
1.241 + year = 1967,
1.242 + volume = 14,
1.243 + pages = {205-220}
1.244 +}
1.245 +
1.246 +@inproceedings{goldberg88cyclecanceling,
1.247 + author = {Andrew V. Goldberg and Robert E. Tarjan},
1.248 + title = {Finding minimum-cost circulations by canceling
1.249 + negative cycles},
1.250 + booktitle = {STOC '88: Proceedings of the Twentieth Annual ACM
1.251 + Symposium on Theory of Computing},
1.252 + year = 1988,
1.253 + publisher = {ACM Press},
1.254 + address = {New York, NY},
1.255 + pages = {388-397}
1.256 +}
1.257 +
1.258 +@article{edmondskarp72theoretical,
1.259 + author = {Jack Edmonds and Richard M. Karp},
1.260 + title = {Theoretical improvements in algorithmic efficiency
1.261 + for network flow problems},
1.262 + journal = {Journal of the ACM},
1.263 + year = 1972,
1.264 + volume = 19,
1.265 + number = 2,
1.266 + pages = {248-264}
1.267 +}
1.268 +
1.269 +@inproceedings{goldberg87approximation,
1.270 + author = {Andrew V. Goldberg and Robert E. Tarjan},
1.271 + title = {Solving minimum-cost flow problems by successive
1.272 + approximation},
1.273 + booktitle = {STOC '87: Proceedings of the Nineteenth Annual ACM
1.274 + Symposium on Theory of Computing},
1.275 + year = 1987,
1.276 + publisher = {ACM Press},
1.277 + address = {New York, NY},
1.278 + pages = {7-18}
1.279 +}
1.280 +
1.281 +@article{goldberg90finding,
1.282 + author = {Andrew V. Goldberg and Robert E. Tarjan},
1.283 + title = {Finding Minimum-Cost Circulations by Successive
1.284 + Approximation},
1.285 + journal = {Mathematics of Operations Research},
1.286 + year = 1990,
1.287 + volume = 15,
1.288 + number = 3,
1.289 + pages = {430-466}
1.290 +}
1.291 +
1.292 +@article{goldberg97efficient,
1.293 + author = {Andrew V. Goldberg},
1.294 + title = {An Efficient Implementation of a Scaling
1.295 + Minimum-Cost Flow Algorithm},
1.296 + journal = {Journal of Algorithms},
1.297 + year = 1997,
1.298 + volume = 22,
1.299 + number = 1,
1.300 + pages = {1-29}
1.301 +}
1.302 +
1.303 +@article{bunnagel98efficient,
1.304 + author = {Ursula B{\"u}nnagel and Bernhard Korte and Jens
1.305 + Vygen},
1.306 + title = {Efficient implementation of the {G}oldberg-{T}arjan
1.307 + minimum-cost flow algorithm},
1.308 + journal = {Optimization Methods and Software},
1.309 + year = 1998,
1.310 + volume = 10,
1.311 + pages = {157-174}
1.312 +}
1.313 +
1.314 +@mastersthesis{kellyoneill91netsimplex,
1.315 + author = {Damian J. Kelly and Garrett M. O'Neill},
1.316 + title = {The Minimum Cost Flow Problem and The Network
1.317 + Simplex Method},
1.318 + school = {University College},
1.319 + address = {Dublin, Ireland},
1.320 + year = 1991,
1.321 + month = sep,
1.322 +}
1.323 +
1.324 +@techreport{lobel96networksimplex,
1.325 + author = {Andreas L{\"o}bel},
1.326 + title = {Solving large-scale real-world minimum-cost flow
1.327 + problems by a network simplex method},
1.328 + institution = {Konrad-Zuse-Zentrum fur Informationstechnik Berlin
1.329 + ({ZIB})},
1.330 + address = {Berlin, Germany},
1.331 + year = 1996,
1.332 + number = {SC 96-7}
1.333 +}
1.334 +
1.335 +@article{frangioni06computational,
1.336 + author = {Antonio Frangioni and Antonio Manca},
1.337 + title = {A Computational Study of Cost Reoptimization for
1.338 + Min-Cost Flow Problems},
1.339 + journal = {INFORMS Journal On Computing},
1.340 + year = 2006,
1.341 + volume = 18,
1.342 + number = 1,
1.343 + pages = {61-70}
1.344 +}
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
2.2 +++ b/scripts/bib2dox.py Sat Sep 26 10:15:49 2009 +0200
2.3 @@ -0,0 +1,806 @@
2.4 +#!/usr/bin/env /usr/local/Python/bin/python2.1
2.5 +"""
2.6 + BibTeX to Doxygen converter
2.7 + Usage: python bib2dox.py bibfile.bib > bibfile.dox
2.8 +
2.9 + This code is the modification of the BibTeX to XML converter
2.10 + by Vidar Bronken Gundersen et al. See the original copyright notices below.
2.11 +
2.12 + **********************************************************************
2.13 +
2.14 + Decoder for bibliographic data, BibTeX
2.15 + Usage: python bibtex2xml.py bibfile.bib > bibfile.xml
2.16 +
2.17 + v.8
2.18 + (c)2002-06-23 Vidar Bronken Gundersen
2.19 + http://bibtexml.sf.net/
2.20 + Reuse approved as long as this notification is kept.
2.21 + Licence: GPL.
2.22 +
2.23 + Contributions/thanks to:
2.24 + Egon Willighagen, http://sf.net/projects/jreferences/
2.25 + Richard Mahoney (for providing a test case)
2.26 +
2.27 + Editted by Sara Sprenkle to be more robust and handle more bibtex features.
2.28 + (c) 2003-01-15
2.29 +
2.30 + 1. Changed bibtex: tags to bibxml: tags.
2.31 + 2. Use xmlns:bibxml="http://bibtexml.sf.net/"
2.32 + 3. Allow spaces between @type and first {
2.33 + 4. "author" fields with multiple authors split by " and "
2.34 + are put in separate xml "bibxml:author" tags.
2.35 + 5. Option for Titles: words are capitalized
2.36 + only if first letter in title or capitalized inside braces
2.37 + 6. Removes braces from within field values
2.38 + 7. Ignores comments in bibtex file (including @comment{ or % )
2.39 + 8. Replaces some special latex tags, e.g., replaces ~ with ' '
2.40 + 9. Handles bibtex @string abbreviations
2.41 + --> includes bibtex's default abbreviations for months
2.42 + --> does concatenation of abbr # " more " and " more " # abbr
2.43 + 10. Handles @type( ... ) or @type{ ... }
2.44 + 11. The keywords field is split on , or ; and put into separate xml
2.45 + "bibxml:keywords" tags
2.46 + 12. Ignores @preamble
2.47 +
2.48 + Known Limitations
2.49 + 1. Does not transform Latex encoding like math mode and special
2.50 + latex symbols.
2.51 + 2. Does not parse author fields into first and last names.
2.52 + E.g., It does not do anything special to an author whose name is
2.53 + in the form LAST_NAME, FIRST_NAME
2.54 + In "author" tag, will show up as
2.55 + <bibxml:author>LAST_NAME, FIRST_NAME</bibxml:author>
2.56 + 3. Does not handle "crossref" fields other than to print
2.57 + <bibxml:crossref>...</bibxml:crossref>
2.58 + 4. Does not inform user of the input's format errors. You just won't
2.59 + be able to transform the file later with XSL
2.60 +
2.61 + You will have to manually edit the XML output if you need to handle
2.62 + these (and unknown) limitations.
2.63 +
2.64 +"""
2.65 +
2.66 +import string, re
2.67 +
2.68 +# set of valid name characters
2.69 +valid_name_chars = '[\w\-:]'
2.70 +
2.71 +#
2.72 +# define global regular expression variables
2.73 +#
2.74 +author_rex = re.compile('\s+and\s+')
2.75 +rembraces_rex = re.compile('[{}]')
2.76 +capitalize_rex = re.compile('({\w*})')
2.77 +
2.78 +# used by bibtexkeywords(data)
2.79 +keywords_rex = re.compile('[,;]')
2.80 +
2.81 +# used by concat_line(line)
2.82 +concatsplit_rex = re.compile('\s*#\s*')
2.83 +
2.84 +# split on {, }, or " in verify_out_of_braces
2.85 +delimiter_rex = re.compile('([{}"])',re.I)
2.86 +
2.87 +field_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
2.88 +data_rex = re.compile('\s*(\w*)\s*=\s*([^,]*),?')
2.89 +
2.90 +url_rex = re.compile('\\\url\{([^}]*)\}')
2.91 +
2.92 +
2.93 +#
2.94 +# return the string parameter without braces
2.95 +#
2.96 +def transformurls(str):
2.97 + return url_rex.sub(r'<a href="\1">\1</a>', str)
2.98 +
2.99 +#
2.100 +# return the string parameter without braces
2.101 +#
2.102 +def removebraces(str):
2.103 + return rembraces_rex.sub('', str)
2.104 +
2.105 +#
2.106 +# latex-specific replacements
2.107 +# (do this after braces were removed)
2.108 +#
2.109 +def latexreplacements(line):
2.110 + line = string.replace(line, '~', ' ')
2.111 + line = string.replace(line, '\\\'a', 'á')
2.112 + line = string.replace(line, '\\"a', 'ä')
2.113 + line = string.replace(line, '\\\'e', 'é')
2.114 + line = string.replace(line, '\\"e', 'ë')
2.115 + line = string.replace(line, '\\\'i', 'í')
2.116 + line = string.replace(line, '\\"i', 'ï')
2.117 + line = string.replace(line, '\\\'o', 'ó')
2.118 + line = string.replace(line, '\\"o', 'ö')
2.119 + line = string.replace(line, '\\\'u', 'ú')
2.120 + line = string.replace(line, '\\"u', 'ü')
2.121 + line = string.replace(line, '\\H o', 'õ')
2.122 + line = string.replace(line, '\\H u', 'ü') # ũ does not exist
2.123 + line = string.replace(line, '\\\'A', 'Á')
2.124 + line = string.replace(line, '\\"A', 'Ä')
2.125 + line = string.replace(line, '\\\'E', 'É')
2.126 + line = string.replace(line, '\\"E', 'Ë')
2.127 + line = string.replace(line, '\\\'I', 'Í')
2.128 + line = string.replace(line, '\\"I', 'Ï')
2.129 + line = string.replace(line, '\\\'O', 'Ó')
2.130 + line = string.replace(line, '\\"O', 'Ö')
2.131 + line = string.replace(line, '\\\'U', 'Ú')
2.132 + line = string.replace(line, '\\"U', 'Ü')
2.133 + line = string.replace(line, '\\H O', 'Õ')
2.134 + line = string.replace(line, '\\H U', 'Ü') # Ũ does not exist
2.135 +
2.136 + return line
2.137 +
2.138 +#
2.139 +# copy characters form a string decoding html expressions (&xyz;)
2.140 +#
2.141 +def copychars(str, ifrom, count):
2.142 + result = ''
2.143 + i = ifrom
2.144 + c = 0
2.145 + html_spec = False
2.146 + while (i < len(str)) and (c < count):
2.147 + if str[i] == '&':
2.148 + html_spec = True;
2.149 + if i+1 < len(str):
2.150 + result += str[i+1]
2.151 + c += 1
2.152 + i += 2
2.153 + else:
2.154 + if not html_spec:
2.155 + if ((str[i] >= 'A') and (str[i] <= 'Z')) or \
2.156 + ((str[i] >= 'a') and (str[i] <= 'z')):
2.157 + result += str[i]
2.158 + c += 1
2.159 + elif str[i] == ';':
2.160 + html_spec = False;
2.161 + i += 1
2.162 +
2.163 + return result
2.164 +
2.165 +
2.166 +#
2.167 +# Handle a list of authors (separated by 'and').
2.168 +# It gives back an array of the follwing values:
2.169 +# - num: the number of authors,
2.170 +# - list: the list of the author names,
2.171 +# - text: the bibtex text (separated by commas and/or 'and')
2.172 +# - abbrev: abbreviation that can be used for indicate the
2.173 +# bibliography entries
2.174 +#
2.175 +def bibtexauthor(data):
2.176 + result = {}
2.177 + bibtex = ''
2.178 + result['list'] = author_rex.split(data)
2.179 + result['num'] = len(result['list'])
2.180 + for i, author in enumerate(result['list']):
2.181 + # general transformations
2.182 + author = latexreplacements(removebraces(author.strip()))
2.183 + # transform "Xyz, A. B." to "A. B. Xyz"
2.184 + pos = author.find(',')
2.185 + if pos != -1:
2.186 + author = author[pos+1:].strip() + ' ' + author[:pos].strip()
2.187 + result['list'][i] = author
2.188 + bibtex += author + '#'
2.189 + bibtex = bibtex[:-1]
2.190 + if result['num'] > 1:
2.191 + ix = bibtex.rfind('#')
2.192 + if result['num'] == 2:
2.193 + bibtex = bibtex[:ix] + ' and ' + bibtex[ix+1:]
2.194 + else:
2.195 + bibtex = bibtex[:ix] + ', and ' + bibtex[ix+1:]
2.196 + bibtex = bibtex.replace('#', ', ')
2.197 + result['text'] = bibtex
2.198 +
2.199 + result['abbrev'] = ''
2.200 + for author in result['list']:
2.201 + pos = author.rfind(' ') + 1
2.202 + count = 1
2.203 + if result['num'] == 1:
2.204 + count = 3
2.205 + result['abbrev'] += copychars(author, pos, count)
2.206 +
2.207 + return result
2.208 +
2.209 +
2.210 +#
2.211 +# data = title string
2.212 +# @return the capitalized title (first letter is capitalized), rest are capitalized
2.213 +# only if capitalized inside braces
2.214 +#
2.215 +def capitalizetitle(data):
2.216 + title_list = capitalize_rex.split(data)
2.217 + title = ''
2.218 + count = 0
2.219 + for phrase in title_list:
2.220 + check = string.lstrip(phrase)
2.221 +
2.222 + # keep phrase's capitalization the same
2.223 + if check.find('{') == 0:
2.224 + title += removebraces(phrase)
2.225 + else:
2.226 + # first word --> capitalize first letter (after spaces)
2.227 + if count == 0:
2.228 + title += check.capitalize()
2.229 + else:
2.230 + title += phrase.lower()
2.231 + count = count + 1
2.232 +
2.233 + return title
2.234 +
2.235 +
2.236 +#
2.237 +# @return the bibtex for the title
2.238 +# @param data --> title string
2.239 +# braces are removed from title
2.240 +#
2.241 +def bibtextitle(data, entrytype):
2.242 + if entrytype in ('book', 'inbook'):
2.243 + title = removebraces(data.strip())
2.244 + else:
2.245 + title = removebraces(capitalizetitle(data.strip()))
2.246 + bibtex = title
2.247 + return bibtex
2.248 +
2.249 +
2.250 +#
2.251 +# function to compare entry lists
2.252 +#
2.253 +def entry_cmp(x, y):
2.254 + return cmp(x[0], y[0])
2.255 +
2.256 +
2.257 +#
2.258 +# print the XML for the transformed "filecont_source"
2.259 +#
2.260 +def bibtexdecoder(filecont_source):
2.261 + filecont = []
2.262 + file = []
2.263 +
2.264 + # want @<alphanumeric chars><spaces>{<spaces><any chars>,
2.265 + pubtype_rex = re.compile('@(\w*)\s*{\s*(.*),')
2.266 + endtype_rex = re.compile('}\s*$')
2.267 + endtag_rex = re.compile('^\s*}\s*$')
2.268 +
2.269 + bracefield_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
2.270 + bracedata_rex = re.compile('\s*(\w*)\s*=\s*{(.*)},?')
2.271 +
2.272 + quotefield_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
2.273 + quotedata_rex = re.compile('\s*(\w*)\s*=\s*"(.*)",?')
2.274 +
2.275 + for line in filecont_source:
2.276 + line = line[:-1]
2.277 +
2.278 + # encode character entities
2.279 + line = string.replace(line, '&', '&')
2.280 + line = string.replace(line, '<', '<')
2.281 + line = string.replace(line, '>', '>')
2.282 +
2.283 + # start entry: publication type (store for later use)
2.284 + if pubtype_rex.match(line):
2.285 + # want @<alphanumeric chars><spaces>{<spaces><any chars>,
2.286 + entrycont = {}
2.287 + entry = []
2.288 + entrytype = pubtype_rex.sub('\g<1>',line)
2.289 + entrytype = string.lower(entrytype)
2.290 + # entryid = pubtype_rex.sub('\g<2>', line)
2.291 +
2.292 + # end entry if just a }
2.293 + elif endtype_rex.match(line):
2.294 + # generate doxygen code for the entry
2.295 +
2.296 + # enty type related formattings
2.297 + if entrytype in ('book', 'inbook'):
2.298 + entrycont['title'] = '<em>' + entrycont['title'] + '</em>'
2.299 + if not entrycont.has_key('author'):
2.300 + entrycont['author'] = entrycont['editor']
2.301 + entrycont['author']['text'] += ', editors'
2.302 + elif entrytype == 'article':
2.303 + entrycont['journal'] = '<em>' + entrycont['journal'] + '</em>'
2.304 + elif entrytype in ('inproceedings', 'incollection', 'conference'):
2.305 + entrycont['booktitle'] = '<em>' + entrycont['booktitle'] + '</em>'
2.306 + elif entrytype == 'techreport':
2.307 + if not entrycont.has_key('type'):
2.308 + entrycont['type'] = 'Technical report'
2.309 + elif entrytype == 'mastersthesis':
2.310 + entrycont['type'] = 'Master\'s thesis'
2.311 + elif entrytype == 'phdthesis':
2.312 + entrycont['type'] = 'PhD thesis'
2.313 +
2.314 + for eline in entrycont:
2.315 + if eline != '':
2.316 + eline = latexreplacements(eline)
2.317 +
2.318 + if entrycont.has_key('pages') and (entrycont['pages'] != ''):
2.319 + entrycont['pages'] = string.replace(entrycont['pages'], '--', '-')
2.320 +
2.321 + if entrycont.has_key('author') and (entrycont['author'] != ''):
2.322 + entry.append(entrycont['author']['text'] + '.')
2.323 + if entrycont.has_key('title') and (entrycont['title'] != ''):
2.324 + entry.append(entrycont['title'] + '.')
2.325 + if entrycont.has_key('journal') and (entrycont['journal'] != ''):
2.326 + entry.append(entrycont['journal'] + ',')
2.327 + if entrycont.has_key('booktitle') and (entrycont['booktitle'] != ''):
2.328 + entry.append('In ' + entrycont['booktitle'] + ',')
2.329 + if entrycont.has_key('type') and (entrycont['type'] != ''):
2.330 + eline = entrycont['type']
2.331 + if entrycont.has_key('number') and (entrycont['number'] != ''):
2.332 + eline += ' ' + entrycont['number']
2.333 + eline += ','
2.334 + entry.append(eline)
2.335 + if entrycont.has_key('institution') and (entrycont['institution'] != ''):
2.336 + entry.append(entrycont['institution'] + ',')
2.337 + if entrycont.has_key('publisher') and (entrycont['publisher'] != ''):
2.338 + entry.append(entrycont['publisher'] + ',')
2.339 + if entrycont.has_key('school') and (entrycont['school'] != ''):
2.340 + entry.append(entrycont['school'] + ',')
2.341 + if entrycont.has_key('address') and (entrycont['address'] != ''):
2.342 + entry.append(entrycont['address'] + ',')
2.343 + if entrycont.has_key('edition') and (entrycont['edition'] != ''):
2.344 + entry.append(entrycont['edition'] + ' edition,')
2.345 + if entrycont.has_key('howpublished') and (entrycont['howpublished'] != ''):
2.346 + entry.append(entrycont['howpublished'] + ',')
2.347 + if entrycont.has_key('volume') and (entrycont['volume'] != ''):
2.348 + eline = entrycont['volume'];
2.349 + if entrycont.has_key('number') and (entrycont['number'] != ''):
2.350 + eline += '(' + entrycont['number'] + ')'
2.351 + if entrycont.has_key('pages') and (entrycont['pages'] != ''):
2.352 + eline += ':' + entrycont['pages']
2.353 + eline += ','
2.354 + entry.append(eline)
2.355 + else:
2.356 + if entrycont.has_key('pages') and (entrycont['pages'] != ''):
2.357 + entry.append('pages ' + entrycont['pages'] + ',')
2.358 + if entrycont.has_key('year') and (entrycont['year'] != ''):
2.359 + if entrycont.has_key('month') and (entrycont['month'] != ''):
2.360 + entry.append(entrycont['month'] + ' ' + entrycont['year'] + '.')
2.361 + else:
2.362 + entry.append(entrycont['year'] + '.')
2.363 + if entrycont.has_key('note') and (entrycont['note'] != ''):
2.364 + entry.append(entrycont['note'] + '.')
2.365 +
2.366 + # generate keys for sorting and for the output
2.367 + sortkey = ''
2.368 + bibkey = ''
2.369 + if entrycont.has_key('author'):
2.370 + for author in entrycont['author']['list']:
2.371 + sortkey += copychars(author, author.rfind(' ')+1, len(author))
2.372 + bibkey = entrycont['author']['abbrev']
2.373 + else:
2.374 + bibkey = 'x'
2.375 + if entrycont.has_key('year'):
2.376 + sortkey += entrycont['year']
2.377 + bibkey += entrycont['year'][-2:]
2.378 + if entrycont.has_key('title'):
2.379 + sortkey += entrycont['title']
2.380 + if entrycont.has_key('key'):
2.381 + sortkey = entrycont['key'] + sortkey
2.382 + bibkey = entrycont['key']
2.383 + entry.insert(0, sortkey)
2.384 + entry.insert(1, bibkey)
2.385 +
2.386 + # add the entry to the file contents
2.387 + filecont.append(entry)
2.388 +
2.389 + else:
2.390 + # field, publication info
2.391 + field = ''
2.392 + data = ''
2.393 +
2.394 + # field = {data} entries
2.395 + if bracedata_rex.match(line):
2.396 + field = bracefield_rex.sub('\g<1>', line)
2.397 + field = string.lower(field)
2.398 + data = bracedata_rex.sub('\g<2>', line)
2.399 +
2.400 + # field = "data" entries
2.401 + elif quotedata_rex.match(line):
2.402 + field = quotefield_rex.sub('\g<1>', line)
2.403 + field = string.lower(field)
2.404 + data = quotedata_rex.sub('\g<2>', line)
2.405 +
2.406 + # field = data entries
2.407 + elif data_rex.match(line):
2.408 + field = field_rex.sub('\g<1>', line)
2.409 + field = string.lower(field)
2.410 + data = data_rex.sub('\g<2>', line)
2.411 +
2.412 + if field in ('author', 'editor'):
2.413 + entrycont[field] = bibtexauthor(data)
2.414 + line = ''
2.415 + elif field == 'title':
2.416 + line = bibtextitle(data, entrytype)
2.417 + elif field != '':
2.418 + line = removebraces(transformurls(data.strip()))
2.419 +
2.420 + if line != '':
2.421 + line = latexreplacements(line)
2.422 + entrycont[field] = line
2.423 +
2.424 +
2.425 + # sort entries
2.426 + filecont.sort(entry_cmp)
2.427 +
2.428 + # count the bibtex keys
2.429 + keytable = {}
2.430 + counttable = {}
2.431 + for entry in filecont:
2.432 + bibkey = entry[1]
2.433 + if not keytable.has_key(bibkey):
2.434 + keytable[bibkey] = 1
2.435 + else:
2.436 + keytable[bibkey] += 1
2.437 +
2.438 + for bibkey in keytable.keys():
2.439 + counttable[bibkey] = 0
2.440 +
2.441 + # generate output
2.442 + for entry in filecont:
2.443 + # generate output key form the bibtex key
2.444 + bibkey = entry[1]
2.445 + if keytable[bibkey] == 1:
2.446 + outkey = bibkey
2.447 + else:
2.448 + outkey = bibkey + chr(97 + counttable[bibkey])
2.449 + counttable[bibkey] += 1
2.450 +
2.451 + # append the entry code to the output
2.452 + file.append('<tr valign="top">\n' + \
2.453 + '<td>[' + outkey + ']</td>')
2.454 + file.append('<td>')
2.455 + file.append('\\anchor ' + outkey)
2.456 + for line in entry[2:]:
2.457 + file.append(line)
2.458 + file.append('</td>\n</tr>')
2.459 + file.append('')
2.460 +
2.461 + return file
2.462 +
2.463 +
2.464 +#
2.465 +# return 1 iff abbr is in line but not inside braces or quotes
2.466 +# assumes that abbr appears only once on the line (out of braces and quotes)
2.467 +#
2.468 +def verify_out_of_braces(line, abbr):
2.469 +
2.470 + phrase_split = delimiter_rex.split(line)
2.471 +
2.472 + abbr_rex = re.compile( '\\b' + abbr + '\\b', re.I)
2.473 +
2.474 + open_brace = 0
2.475 + open_quote = 0
2.476 +
2.477 + for phrase in phrase_split:
2.478 + if phrase == "{":
2.479 + open_brace = open_brace + 1
2.480 + elif phrase == "}":
2.481 + open_brace = open_brace - 1
2.482 + elif phrase == '"':
2.483 + if open_quote == 1:
2.484 + open_quote = 0
2.485 + else:
2.486 + open_quote = 1
2.487 + elif abbr_rex.search(phrase):
2.488 + if open_brace == 0 and open_quote == 0:
2.489 + return 1
2.490 +
2.491 + return 0
2.492 +
2.493 +
2.494 +#
2.495 +# a line in the form phrase1 # phrase2 # ... # phrasen
2.496 +# is returned as phrase1 phrase2 ... phrasen
2.497 +# with the correct punctuation
2.498 +# Bug: Doesn't always work with multiple abbreviations plugged in
2.499 +#
2.500 +def concat_line(line):
2.501 + # only look at part after equals
2.502 + field = field_rex.sub('\g<1>',line)
2.503 + rest = field_rex.sub('\g<2>',line)
2.504 +
2.505 + concat_line = field + ' ='
2.506 +
2.507 + pound_split = concatsplit_rex.split(rest)
2.508 +
2.509 + phrase_count = 0
2.510 + length = len(pound_split)
2.511 +
2.512 + for phrase in pound_split:
2.513 + phrase = phrase.strip()
2.514 + if phrase_count != 0:
2.515 + if phrase.startswith('"') or phrase.startswith('{'):
2.516 + phrase = phrase[1:]
2.517 + elif phrase.startswith('"'):
2.518 + phrase = phrase.replace('"','{',1)
2.519 +
2.520 + if phrase_count != length-1:
2.521 + if phrase.endswith('"') or phrase.endswith('}'):
2.522 + phrase = phrase[:-1]
2.523 + else:
2.524 + if phrase.endswith('"'):
2.525 + phrase = phrase[:-1]
2.526 + phrase = phrase + "}"
2.527 + elif phrase.endswith('",'):
2.528 + phrase = phrase[:-2]
2.529 + phrase = phrase + "},"
2.530 +
2.531 + # if phrase did have \#, add the \# back
2.532 + if phrase.endswith('\\'):
2.533 + phrase = phrase + "#"
2.534 + concat_line = concat_line + ' ' + phrase
2.535 +
2.536 + phrase_count = phrase_count + 1
2.537 +
2.538 + return concat_line
2.539 +
2.540 +
2.541 +#
2.542 +# substitute abbreviations into filecont
2.543 +# @param filecont_source - string of data from file
2.544 +#
2.545 +def bibtex_replace_abbreviations(filecont_source):
2.546 + filecont = filecont_source.splitlines()
2.547 +
2.548 + # These are defined in bibtex, so we'll define them too
2.549 + abbr_list = ['jan','feb','mar','apr','may','jun',
2.550 + 'jul','aug','sep','oct','nov','dec']
2.551 + value_list = ['January','February','March','April',
2.552 + 'May','June','July','August','September',
2.553 + 'October','November','December']
2.554 +
2.555 + abbr_rex = []
2.556 + total_abbr_count = 0
2.557 +
2.558 + front = '\\b'
2.559 + back = '(,?)\\b'
2.560 +
2.561 + for x in abbr_list:
2.562 + abbr_rex.append( re.compile( front + abbr_list[total_abbr_count] + back, re.I ) )
2.563 + total_abbr_count = total_abbr_count + 1
2.564 +
2.565 +
2.566 + abbrdef_rex = re.compile('\s*@string\s*{\s*('+ valid_name_chars +'*)\s*=(.*)',
2.567 + re.I)
2.568 +
2.569 + comment_rex = re.compile('@comment\s*{',re.I)
2.570 + preamble_rex = re.compile('@preamble\s*{',re.I)
2.571 +
2.572 + waiting_for_end_string = 0
2.573 + i = 0
2.574 + filecont2 = ''
2.575 +
2.576 + for line in filecont:
2.577 + if line == ' ' or line == '':
2.578 + continue
2.579 +
2.580 + if waiting_for_end_string:
2.581 + if re.search('}',line):
2.582 + waiting_for_end_string = 0
2.583 + continue
2.584 +
2.585 + if abbrdef_rex.search(line):
2.586 + abbr = abbrdef_rex.sub('\g<1>', line)
2.587 +
2.588 + if abbr_list.count(abbr) == 0:
2.589 + val = abbrdef_rex.sub('\g<2>', line)
2.590 + abbr_list.append(abbr)
2.591 + value_list.append(string.strip(val))
2.592 + abbr_rex.append( re.compile( front + abbr_list[total_abbr_count] + back, re.I ) )
2.593 + total_abbr_count = total_abbr_count + 1
2.594 + waiting_for_end_string = 1
2.595 + continue
2.596 +
2.597 + if comment_rex.search(line):
2.598 + waiting_for_end_string = 1
2.599 + continue
2.600 +
2.601 + if preamble_rex.search(line):
2.602 + waiting_for_end_string = 1
2.603 + continue
2.604 +
2.605 +
2.606 + # replace subsequent abbreviations with the value
2.607 + abbr_count = 0
2.608 +
2.609 + for x in abbr_list:
2.610 +
2.611 + if abbr_rex[abbr_count].search(line):
2.612 + if verify_out_of_braces(line,abbr_list[abbr_count]) == 1:
2.613 + line = abbr_rex[abbr_count].sub( value_list[abbr_count] + '\g<1>', line)
2.614 + # Check for # concatenations
2.615 + if concatsplit_rex.search(line):
2.616 + line = concat_line(line)
2.617 + abbr_count = abbr_count + 1
2.618 +
2.619 +
2.620 + filecont2 = filecont2 + line + '\n'
2.621 + i = i+1
2.622 +
2.623 +
2.624 + # Do one final pass over file
2.625 +
2.626 + # make sure that didn't end up with {" or }" after the substitution
2.627 + filecont2 = filecont2.replace('{"','{{')
2.628 + filecont2 = filecont2.replace('"}','}}')
2.629 +
2.630 + afterquotevalue_rex = re.compile('"\s*,\s*')
2.631 + afterbrace_rex = re.compile('"\s*}')
2.632 + afterbracevalue_rex = re.compile('(=\s*{[^=]*)},\s*')
2.633 +
2.634 + # add new lines to data that changed because of abbreviation substitutions
2.635 + filecont2 = afterquotevalue_rex.sub('",\n', filecont2)
2.636 + filecont2 = afterbrace_rex.sub('"\n}', filecont2)
2.637 + filecont2 = afterbracevalue_rex.sub('\g<1>},\n', filecont2)
2.638 +
2.639 + return filecont2
2.640 +
2.641 +#
2.642 +# convert @type( ... ) to @type{ ... }
2.643 +#
2.644 +def no_outer_parens(filecont):
2.645 +
2.646 + # do checking for open parens
2.647 + # will convert to braces
2.648 + paren_split = re.split('([(){}])',filecont)
2.649 +
2.650 + open_paren_count = 0
2.651 + open_type = 0
2.652 + look_next = 0
2.653 +
2.654 + # rebuild filecont
2.655 + filecont = ''
2.656 +
2.657 + at_rex = re.compile('@\w*')
2.658 +
2.659 + for phrase in paren_split:
2.660 + if look_next == 1:
2.661 + if phrase == '(':
2.662 + phrase = '{'
2.663 + open_paren_count = open_paren_count + 1
2.664 + else:
2.665 + open_type = 0
2.666 + look_next = 0
2.667 +
2.668 + if phrase == '(':
2.669 + open_paren_count = open_paren_count + 1
2.670 +
2.671 + elif phrase == ')':
2.672 + open_paren_count = open_paren_count - 1
2.673 + if open_type == 1 and open_paren_count == 0:
2.674 + phrase = '}'
2.675 + open_type = 0
2.676 +
2.677 + elif at_rex.search( phrase ):
2.678 + open_type = 1
2.679 + look_next = 1
2.680 +
2.681 + filecont = filecont + phrase
2.682 +
2.683 + return filecont
2.684 +
2.685 +
2.686 +#
2.687 +# make all whitespace into just one space
2.688 +# format the bibtex file into a usable form.
2.689 +#
2.690 +def bibtexwasher(filecont_source):
2.691 +
2.692 + space_rex = re.compile('\s+')
2.693 + comment_rex = re.compile('\s*%')
2.694 +
2.695 + filecont = []
2.696 +
2.697 + # remove trailing and excessive whitespace
2.698 + # ignore comments
2.699 + for line in filecont_source:
2.700 + line = string.strip(line)
2.701 + line = space_rex.sub(' ', line)
2.702 + # ignore comments
2.703 + if not comment_rex.match(line) and line != '':
2.704 + filecont.append(' '+ line)
2.705 +
2.706 + filecont = string.join(filecont, '')
2.707 +
2.708 + # the file is in one long string
2.709 +
2.710 + filecont = no_outer_parens(filecont)
2.711 +
2.712 + #
2.713 + # split lines according to preferred syntax scheme
2.714 + #
2.715 + filecont = re.sub('(=\s*{[^=]*)},', '\g<1>},\n', filecont)
2.716 +
2.717 + # add new lines after commas that are after values
2.718 + filecont = re.sub('"\s*,', '",\n', filecont)
2.719 + filecont = re.sub('=\s*([\w\d]+)\s*,', '= \g<1>,\n', filecont)
2.720 + filecont = re.sub('(@\w*)\s*({(\s*)[^,\s]*)\s*,',
2.721 + '\n\n\g<1>\g<2>,\n', filecont)
2.722 +
2.723 + # add new lines after }
2.724 + filecont = re.sub('"\s*}','"\n}\n', filecont)
2.725 + filecont = re.sub('}\s*,','},\n', filecont)
2.726 +
2.727 +
2.728 + filecont = re.sub('@(\w*)', '\n@\g<1>', filecont)
2.729 +
2.730 + # character encoding, reserved latex characters
2.731 + filecont = re.sub('{\\\&}', '&', filecont)
2.732 + filecont = re.sub('\\\&', '&', filecont)
2.733 +
2.734 + # do checking for open braces to get format correct
2.735 + open_brace_count = 0
2.736 + brace_split = re.split('([{}])',filecont)
2.737 +
2.738 + # rebuild filecont
2.739 + filecont = ''
2.740 +
2.741 + for phrase in brace_split:
2.742 + if phrase == '{':
2.743 + open_brace_count = open_brace_count + 1
2.744 + elif phrase == '}':
2.745 + open_brace_count = open_brace_count - 1
2.746 + if open_brace_count == 0:
2.747 + filecont = filecont + '\n'
2.748 +
2.749 + filecont = filecont + phrase
2.750 +
2.751 + filecont2 = bibtex_replace_abbreviations(filecont)
2.752 +
2.753 + # gather
2.754 + filecont = filecont2.splitlines()
2.755 + i=0
2.756 + j=0 # count the number of blank lines
2.757 + for line in filecont:
2.758 + # ignore blank lines
2.759 + if line == '' or line == ' ':
2.760 + j = j+1
2.761 + continue
2.762 + filecont[i] = line + '\n'
2.763 + i = i+1
2.764 +
2.765 + # get rid of the extra stuff at the end of the array
2.766 + # (The extra stuff are duplicates that are in the array because
2.767 + # blank lines were removed.)
2.768 + length = len( filecont)
2.769 + filecont[length-j:length] = []
2.770 +
2.771 + return filecont
2.772 +
2.773 +
2.774 +def filehandler(filepath):
2.775 + try:
2.776 + fd = open(filepath, 'r')
2.777 + filecont_source = fd.readlines()
2.778 + fd.close()
2.779 + except:
2.780 + print 'Could not open file:', filepath
2.781 + washeddata = bibtexwasher(filecont_source)
2.782 + outdata = bibtexdecoder(washeddata)
2.783 + print '/**'
2.784 + print '\page references References'
2.785 + print
2.786 + print '<table border="0" cellspacing="5px" width="100%">'
2.787 + print
2.788 + for line in outdata:
2.789 + print line
2.790 + print '</table>'
2.791 + print
2.792 + print '*/'
2.793 +
2.794 +
2.795 +# main program
2.796 +
2.797 +def main():
2.798 + import sys
2.799 + if sys.argv[1:]:
2.800 + filepath = sys.argv[1]
2.801 + else:
2.802 + print "No input file"
2.803 + sys.exit()
2.804 + filehandler(filepath)
2.805 +
2.806 +if __name__ == "__main__": main()
2.807 +
2.808 +
2.809 +# end python script