commit 34ba7f535e474b2c18465d31d329d629ba347860
parent 29ae28f04e773bba12a3e7f9cd4e16e4fe8f619e
Author: Nils Gillmann <ng0@n0.is>
Date: Sun, 7 Oct 2018 12:29:47 +0000
lint.
Signed-off-by: Nils Gillmann <ng0@n0.is>
Diffstat:
| M | BibTeX.py | | | 250 | ++++++++++++++++++++++++++++++++++++++----------------------------------------- |
1 file changed, 121 insertions(+), 129 deletions(-)
diff --git a/BibTeX.py b/BibTeX.py
@@ -17,29 +17,29 @@ import config
import rank
-__all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize',
- 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile',
- 'splitBibTeXEntriesBy', 'sortBibTexEntriesBy', ]
+__all__ = ['ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize',
+ 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile',
+ 'splitBibTeXEntriesBy', 'sortBibTexEntriesBy',]
# List: must map from month number to month name.
-MONTHS = [ None,
- "January", "February", "March", "April", "May", "June",
- "July", "August", "September", "October", "November", "December"]
+MONTHS = [None,
+ "January", "February", "March", "April", "May", "June",
+ "July", "August", "September", "October", "November", "December"]
# Fields that we only care about for making web pages (BibTeX doesn't
# recognize them.)
-WWW_FIELDS = [ 'www_section', 'www_important', 'www_remarks',
- 'www_abstract_url', 'www_html_url', 'www_pdf_url', 'www_ps_url',
- 'www_txt_url', 'www_ps_gz_url', 'www_amazon_url',
- 'www_excerpt_url', 'www_publisher_url',
- 'www_cache_section', 'www_tags' ]
+WWW_FIELDS = ['www_section', 'www_important', 'www_remarks',
+ 'www_abstract_url', 'www_html_url', 'www_pdf_url',
+ 'www_ps_url', 'www_txt_url', 'www_ps_gz_url',
+ 'www_amazon_url', 'www_excerpt_url', 'www_publisher_url',
+ 'www_cache_section', 'www_tags']
def url_untranslate(s):
"""Change a BibTeX key into a string suitable for use in a URL."""
s = re.sub(r'([%<>`#, &_\';])',
lambda m: "_%02x"%ord(m.group(1)),
s)
- s = s.replace("/",":")
+ s = s.replace("/", ":")
return s
class ParseError(Exception):
@@ -50,7 +50,7 @@ class ParseError(Exception):
def smartJoin(*lst):
"""Equivalent to os.path.join, but handle"." and ".." entries a bit better.
"""
- lst = [ item for item in lst if item != "." ]
+ lst = [item for item in lst if item != "."]
idx = 0
while idx < len(lst):
if idx > 0 and lst[idx] == "..":
@@ -94,7 +94,7 @@ class BibTeX:
for k in cr.entries.keys():
if ent.entries.has_key(k):
print "ERROR: %s defined both in %s and in %s"%(
- k,ent.key,cr.key)
+ k, ent.key, cr.key)
else:
ent.entries[k] = cr.entries[k]
@@ -124,7 +124,7 @@ def buildAuthorTable(entries):
authorsByLast.setdefault(tuple(a.last), []).append(a)
# map from author to collapsed author.
result = {}
- for k,v in config.COLLAPSE_AUTHORS.items():
+ for k, v in config.COLLAPSE_AUTHORS.items():
a = parseAuthor(k)[0]
c = parseAuthor(v)[0]
result[c] = c
@@ -143,9 +143,9 @@ def buildAuthorTable(entries):
result[author] = c
if 0:
- for a,c in result.items():
+ for a, c in result.items():
if a != c:
- print "Collapsing authors: %s => %s" % (a,c)
+ print "Collapsing authors: %s => %s" % (a, c)
if 0:
print parseAuthor("Franz Kaashoek")[0].collapsesTo(
parseAuthor("M. Franz Kaashoek")[0])
@@ -209,7 +209,7 @@ def sortEntriesBy(entries, field, default):
tmp.append((txtize(v_j.strip()), i, ent_j))
else: tmp.append((txtize(v), i, ent))
tmp.sort()
- return [ t[2] for t in tmp ]
+ return [t[2] for t in tmp]
def splitEntriesByAuthor(entries):
"""Take a list of entries, sort them by author names, and return:
@@ -235,7 +235,7 @@ def splitEntriesByAuthor(entries):
result.setdefault(sortkey, []).append(ent)
sortnames = result.keys()
sortnames.sort()
- sections = [ (htmlResult[n], result[n]) for n in sortnames ]
+ sections = [(htmlResult[n], result[n]) for n in sortnames]
return sections, url_map
## def sortEntriesByAuthor(entries):
@@ -255,8 +255,7 @@ def sortEntriesByDate(entries):
i = 0
for ent in entries:
i += 1
- if (ent.get('month') == "forthcoming" or
- ent.get('year') == "forthcoming"):
+ if (ent.get('month') == "forthcoming" or ent.get('year') == "forthcoming"):
tmp.append((20000*13, i, ent))
continue
try:
@@ -279,14 +278,16 @@ def sortEntriesByDate(entries):
date = 10000*13
tmp.append((date, i, ent))
tmp.sort()
- return [ t[2] for t in tmp ]
+ return [t[2] for t in tmp]
# List of fields that appear when we display the entries as BibTeX.
-DISPLAYED_FIELDS = [ 'title', 'author', 'journal', 'booktitle',
-'school', 'institution', 'organization', 'volume', 'number', 'year',
-'month', 'address', 'location', 'chapter', 'edition', 'pages', 'editor',
-'howpublished', 'key', 'publisher', 'type', 'note', 'series' ]
+DISPLAYED_FIELDS = ['title', 'author', 'journal', 'booktitle',
+ 'school', 'institution', 'organization', 'volume',
+ 'number', 'year', 'month', 'address', 'location',
+ 'chapter', 'edition', 'pages', 'editor',
+ 'howpublished', 'key', 'publisher', 'type',
+ 'note', 'series']
class BibTeXEntry:
"""A single BibTeX entry."""
@@ -296,7 +297,7 @@ class BibTeXEntry:
self.entries = entries # Map from key to value.
self.entryLine = 0 # Defined on this line number
def get(self, k, v=None):
- return self.entries.get(k,v)
+ return self.entries.get(k, v)
def has_key(self, k):
return self.entries.has_key(k)
def __getitem__(self, k):
@@ -304,7 +305,7 @@ class BibTeXEntry:
def __setitem__(self, k, v):
self.entries[k] = v
def __str__(self):
- return self.format(70,1)
+ return self.format(70, 1)
def getURL(self):
"""Return the best URL to use for this paper, or None."""
best = None
@@ -346,7 +347,7 @@ class BibTeXEntry:
s = "%s = %s,\n" %(f, invStrings[v])
else:
s = "%s = {%s},\n" % (f, v)
- d.append(_split(s,width,indent))
+ d.append(_split(s, width, indent))
d.append("}\n")
return "".join(d)
def resolve(self):
@@ -382,23 +383,23 @@ class BibTeXEntry:
def _check(self):
errs = []
if self.type == 'inproceedings':
- fields = 'booktitle', 'year',
+ fields = 'booktitle', 'year'
elif self.type == 'incollection':
- fields = 'booktitle', 'year',
+ fields = 'booktitle', 'year'
elif self.type == 'proceedings':
- fields = 'booktitle', 'editor',
+ fields = 'booktitle', 'editor'
elif self.type == 'article':
- fields = 'journal', 'year',
+ fields = 'journal', 'year'
elif self.type == 'book':
fields = 'title', 'year'
- elif self.type == 'booklet':
- fields = (),
+ # elif self.type == 'booklet':
+ # fields = (),
elif self.type == 'techreport':
fields = 'institution',
elif self.type == 'misc':
fields = 'howpublished',
- elif self.type == 'conference':
- fields = 'booktitle', 'year',
+ # elif self.type == 'conference':
+ # fields = 'booktitle', 'year',
elif self.type in ('mastersthesis', 'phdthesis'):
fields = ()
else:
@@ -434,8 +435,7 @@ class BibTeXEntry:
errs.append("ERROR: unknown www field %s"% field)
if value.strip()[-1:] == '.' and \
field not in ("notes", "www_remarks", "author"):
- errs.append("ERROR: %s.%s has an extraneous period"%(self.key,
- field))
+ errs.append("ERROR: %s.%s has an extraneous period"%(self.key, field))
return errs
def biblio_to_html(self):
@@ -449,9 +449,9 @@ class BibTeXEntry:
res = ["In the ", m.group(1),
'<a href="%s">'%bookurl, m.group(2), "</a>"]
else:
- res = ['In the <a href="%s">%s</a>' % (bookurl,booktitle)]
+ res = ['In the <a href="%s">%s</a>' % (bookurl, booktitle)]
else:
- res = ["In the ", booktitle ]
+ res = ["In the ", booktitle]
if self.get("edition"):
res.append(",")
@@ -462,7 +462,7 @@ class BibTeXEntry:
elif self.get("address"):
res.append(", ")
res.append(self['address'])
- res.append(", %s %s" % (self.get('month',""), self['year']))
+ res.append(", %s %s" % (self.get('month', ""), self['year']))
if not self.get('pages'):
pass
elif "-" in self['pages']:
@@ -473,14 +473,14 @@ class BibTeXEntry:
res = ["In "]
if self.get('journalurl'):
res.append('<a href="%s">%s</a>'%
- (self['journalurl'],self['journal']))
+ (self['journalurl'], self['journal']))
else:
res.append(self['journal'])
if self.get('volume'):
res.append(" <b>%s</b>"%self['volume'])
if self.get('number'):
res.append("(%s)"%self['number'])
- res.append(", %s %s" % (self.get('month',""), self['year']))
+ res.append(", %s %s" % (self.get('month', ""), self['year']))
if not self.get('pages'):
pass
elif "-" in self['pages']:
@@ -488,9 +488,9 @@ class BibTeXEntry:
else:
res.append(", page %s"%self['pages'])
elif self.type == 'techreport':
- res = [ "%s %s %s" % (self['institution'],
- self.get('type', 'technical report'),
- self.get('number', "")) ]
+ res = ["%s %s %s" % (self['institution'],
+ self.get('type', 'technical report'),
+ self.get('number', ""))]
if self.get('month') or self.get('year'):
res.append(", %s %s" % (self.get('month', ''),
self.get('year', '')))
@@ -506,15 +506,15 @@ class BibTeXEntry:
if self.get('month') or self.get('year'):
res.append(", %s %s" % (self.get('month', ''),
self.get('year', '')))
- elif self.type == 'book':
- res = [self['publisher']]
- if self.get('year'):
- res.append(" ");
- res.append(self.get('year'));
- # res.append(", %s"%(self.get('year')))
- if self.get('series'):
- res.append(",");
- res.append(self['series']);
+ elif self.type == 'book':
+ res = [self['publisher']]
+ if self.get('year'):
+ res.append(" ")
+ res.append(self.get('year'))
+ # res.append(", %s"%(self.get('year')))
+ if self.get('series'):
+ res.append(",")
+ res.append(self['series'])
elif self.type == 'misc':
res = [self['howpublished']]
if self.get('month') or self.get('year'):
@@ -534,8 +534,8 @@ class BibTeXEntry:
bibtexurl = "./bibtex.html#%s"%url_untranslate(self.key)
res.append((" <span class='availability'>"
- "(<a href='%s'>BibTeX entry</a>)"
- "</span>") %bibtexurl)
+ "(<a href='%s'>BibTeX entry</a>)"
+ "</span>") %bibtexurl)
return htmlize("".join(res))
def to_html(self, cache_path="./cache", base_url="."):
@@ -543,9 +543,9 @@ class BibTeXEntry:
imp = self.isImportant()
draft = self.get('year') == 'forthcoming'
if imp:
- res = ["<li><div class='impEntry'><p class='impEntry'>" ]
+ res = ["<li><div class='impEntry'><p class='impEntry'>"]
elif draft:
- res = ["<li><div class='draftEntry'><p class='draftEntry'>" ]
+ res = ["<li><div class='draftEntry'><p class='draftEntry'>"]
else:
res = ["<li><p class='entry'>"]
@@ -558,47 +558,46 @@ class BibTeXEntry:
res.append(r)
res.append("<span class='title'><a name='%s'>%s</a></span>"%(
- url_untranslate(self.key),htmlize(self['title'])))
+ url_untranslate(self.key), htmlize(self['title'])))
- for cached in 0,1:
+ for cached in 0, 1:
availability = []
if not cached:
- for which in [ "amazon", "excerpt", "publisher" ]:
+ for which in ["amazon", "excerpt", "publisher"]:
key = "www_%s_url"%which
if self.get(key):
- url=self[key]
+ url = self[key]
url = unTeXescapeURL(url)
- availability.append('<a href="%s">%s</a>' %(url,which))
+ availability.append('<a href="%s">%s</a>' %(url, which))
cache_section = self.get('www_cache_section', ".")
if cache_section not in config.CACHE_SECTIONS:
if cache_section != ".":
print >>sys.stderr, "Unrecognized cache section %s"%(
cache_section)
- cache_section="."
+ cache_section = "."
- for key, name, ext in (('www_abstract_url', 'abstract','abstract'),
+ for key, name, ext in (('www_abstract_url', 'abstract', 'abstract'),
('www_html_url', 'HTML', 'html'),
('www_pdf_url', 'PDF', 'pdf'),
('www_ps_url', 'PS', 'ps'),
('www_txt_url', 'TXT', 'txt'),
- ('www_ps_gz_url', 'gzipped PS','ps.gz')
- ):
+ ('www_ps_gz_url', 'gzipped PS', 'ps.gz')):
if cached:
#XXXX the URL needs to be relative to the absolute
#XXXX cache path.
- url = smartJoin(cache_path,cache_section,
- "%s.%s"%(self.key,ext))
+ url = smartJoin(cache_path, cache_section,
+ "%s.%s"%(self.key, ext))
fname = smartJoin(config.OUTPUT_DIR, config.CACHE_DIR,
cache_section,
- "%s.%s"%(self.key,ext))
+ "%s.%s"%(self.key, ext))
if not os.path.exists(fname): continue
else:
url = self.get(key)
if not url: continue
url = unTeXescapeURL(url)
url = url.replace('&', '&')
- availability.append('<a href="%s">%s</a>' %(url,name))
+ availability.append('<a href="%s">%s</a>' %(url, name))
if availability:
res.append([" ", " "][cached])
@@ -610,7 +609,7 @@ class BibTeXEntry:
res.append("<br /><span class='author'>by ")
#res.append("\n<!-- %r -->\n" % self.parsedAuthor)
- htmlAuthors = [ a.htmlizeWithLink() for a in self.parsedAuthor ]
+ htmlAuthors = [a.htmlizeWithLink() for a in self.parsedAuthor]
if len(htmlAuthors) == 1:
res.append(htmlAuthors[0])
@@ -656,14 +655,13 @@ RE_LONE_AMP = re.compile(r'&([^a-z0-9])')
RE_LONE_I = re.compile(r'\\i([^a-z0-9])')
RE_ACCENT = re.compile(r'\\([\'`~^"c])([^{]|{.})')
RE_LIGATURE = re.compile(r'\\(AE|ae|OE|oe|AA|aa|O|o|ss)([^a-z0-9])')
-ACCENT_MAP = { "'" : 'acute',
- "`" : 'grave',
- "~" : 'tilde',
- "^" : 'circ',
- '"' : 'uml',
- "c" : 'cedil',
- }
-UNICODE_MAP = { 'ń' : 'ń', }
+ACCENT_MAP = {"'" : 'acute',
+ "`" : 'grave',
+ "~" : 'tilde',
+ "^" : 'circ',
+ '"' : 'uml',
+ "c" : 'cedil',}
+UNICODE_MAP = {'ń' : 'ń',}
HTML_LIGATURE_MAP = {
'AE' : 'Æ',
'ae' : 'æ',
@@ -673,30 +671,29 @@ HTML_LIGATURE_MAP = {
'aa' : 'å',
'O' : 'Ø',
'o' : 'ø',
- 'ss' : 'ß',
- }
+ 'ss' : 'ß',}
RE_TEX_CMD = re.compile(r"(?:\\[a-zA-Z@]+|\\.)")
RE_PAGE_SPAN = re.compile(r"(\d)--(\d)")
def _unaccent(m):
- accent,char = m.groups()
+ accent, char = m.groups()
if char[0] == '{':
char = char[1]
accented = "&%s%s;" % (char, ACCENT_MAP[accent])
return UNICODE_MAP.get(accented, accented)
def _unlig_html(m):
- return "%s%s"%(HTML_LIGATURE_MAP[m.group(1)],m.group(2))
+ return "%s%s"%(HTML_LIGATURE_MAP[m.group(1)], m.group(2))
def htmlize(s):
"""Turn a TeX string into good-looking HTML."""
s = RE_LONE_AMP.sub(lambda m: "&%s" % m.group(1), s)
s = RE_LONE_I.sub(lambda m: "i%s" % m.group(1), s)
s = RE_ACCENT.sub(_unaccent, s)
s = unTeXescapeURL(s)
- s = RE_LIGATURE.sub(_unlig_html, s);
+ s = RE_LIGATURE.sub(_unlig_html, s)
s = RE_TEX_CMD.sub("", s)
s = s.translate(ALLCHARS, "{}")
s = RE_PAGE_SPAN.sub(lambda m: "%s-%s"%(m.groups()), s)
- s = s.replace("---", "—");
- s = s.replace("--", "–");
+ s = s.replace("---", "—")
+ s = s.replace("--", "–")
return s
def author_url(author):
@@ -715,9 +712,7 @@ def txtize(s):
s = s.translate(ALLCHARS, "{}")
return s
-PROCEEDINGS_RE = re.compile(
- r'((?:proceedings|workshop record) of(?: the)? )(.*)',
- re.I)
+PROCEEDINGS_RE = re.compile(r'((?:proceedings|workshop record) of(?: the)? )(.*)', re.I)
class ParsedAuthor:
"""The parsed name of an author.
@@ -742,9 +737,9 @@ class ParsedAuthor:
def __eq__(self, o):
return ((self.first == o.first) and
- (self.last == o.last) and
- (self.von == o.von) and
- (self.jr == o.jr))
+ (self.last == o.last) and
+ (self.von == o.von) and
+ (self.jr == o.jr))
def __hash__(self):
return hash(repr(self))
@@ -762,7 +757,7 @@ class ParsedAuthor:
if len(self.first) == len(o.first):
n = []
- for a,b in zip(self.first, o.first):
+ for a, b in zip(self.first, o.first):
if a == b:
n.append(a)
elif len(a) == 2 and a[1] == '.' and a[0] == b[0]:
@@ -778,7 +773,7 @@ class ParsedAuthor:
else:
return self
else:
- realname = max([len(n) for n in self.first+o.first])>2
+ realname = max([len(n) for n in self.first+o.first]) > 2
if not realname:
return self
@@ -813,12 +808,12 @@ class ParsedAuthor:
return self
def __repr__(self):
- return "ParsedAuthor(%r,%r,%r,%r)"%(self.first,self.von,
- self.last,self.jr)
+ return "ParsedAuthor(%r,%r,%r,%r)"%(self.first, self.von,
+ self.last, self.jr)
def __str__(self):
a = " ".join(self.first+self.von+self.last)
if self.jr:
- return "%s, %s" % (a,self.jr)
+ return "%s, %s" % (a, self.jr)
return a
def getHomepage(self):
@@ -832,7 +827,7 @@ class ParsedAuthor:
"""Return a representation of this author's name in von-last-first-jr
order, unless overridden by ALPH """
s = self.html
- for pat,v in config.ALPHABETIZE_AUTHOR_AS_RE_LIST:
+ for pat, v in config.ALPHABETIZE_AUTHOR_AS_RE_LIST:
if pat.search(s):
return v
@@ -854,11 +849,11 @@ class ParsedAuthor:
a = self.html
u = self.getHomepage()
if u:
- return "<a href='%s'>%s</a>"%(u,a)
+ return "<a href='%s'>%s</a>"%(u, a)
else:
return a
-def _split(s,w=79,indent=8):
+def _split(s, w=79, indent=8):
r = []
s = re.sub(r"\s+", " ", s)
first = 1
@@ -960,23 +955,23 @@ def _parseAuthor(s):
cur.append(item)
if commas == 0:
- split_von(f,v,l,fvl)
+ split_von(f, v, l, fvl)
else:
f_tmp = []
- split_von(f_tmp,v,l,vl)
+ split_von(f_tmp, v, l, vl)
- parsedAuthors.append(ParsedAuthor(f,v,l,j))
+ parsedAuthors.append(ParsedAuthor(f, v, l, j))
return parsedAuthors
-ALLCHARS = "".join(map(chr,range(256)))
-PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127)))
+ALLCHARS = "".join(map(chr, range(256)))
+PRINTINGCHARS = "\t\n\r"+"".join(map(chr, range(32, 127)))
LC_CHARS = "abcdefghijklmnopqrstuvwxyz"
SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"@")
RE_ESCAPED = re.compile(r'\\.')
-def split_von(f,v,l,x):
+def split_von(f, v, l, x):
in_von = 0
while x:
tt = t = x[0]
@@ -1017,8 +1012,8 @@ class Parser:
self.strings.update(initial_strings)
self.newStrings = {}
self.invStrings = {}
- for k,v in config.INITIAL_STRINGS.items():
- self.invStrings[v]=k
+ for k, v in config.INITIAL_STRINGS.items():
+ self.invStrings[v] = k
self.fileiter = fileiter
if result is None:
result = BibTeX()
@@ -1028,7 +1023,7 @@ class Parser:
def _parseKey(self, line):
it = self.fileiter
- line = _advance(it,line)
+ line = _advance(it, line)
m = KEY_RE.match(line)
if not m:
raise ParseError("Expected key at line %s"%self.fileiter.lineno)
@@ -1040,13 +1035,13 @@ class Parser:
bracelevel = 0
data = []
while 1:
- line = _advance(it,line)
+ line = _advance(it, line)
line = line.strip()
assert line
# Literal string?
if line[0] == '"':
- line=line[1:]
+ line = line[1:]
self.litStringLine = it.lineno
while 1:
if bracelevel:
@@ -1120,7 +1115,7 @@ class Parser:
# Got a string, check for concatenation.
if line.isspace() or not line:
data.append(" ")
- line = _advance(it,line)
+ line = _advance(it, line)
line = line.strip()
assert line
if line[0] == '#':
@@ -1135,20 +1130,19 @@ class Parser:
def _parseEntry(self, line): #name, strings, entries
it = self.fileiter
self.entryLine = it.lineno
- line = _advance(it,line)
+ line = _advance(it, line)
m = BRACE_BEGIN_RE.match(line)
if not m:
raise ParseError("Expected an opening brace at line %s"%it.lineno)
line = m.group(1)
- proto = { 'string' : 'p',
- 'preamble' : 'v',
- }.get(self.curEntType, 'kp*')
+ proto = {'string' : 'p',
+ 'preamble' : 'v',}.get(self.curEntType, 'kp*')
v = []
while 1:
- line = _advance(it,line)
+ line = _advance(it, line)
m = BRACE_END_RE.match(line)
if m:
@@ -1166,7 +1160,7 @@ class Parser:
elif proto[0] == 'p':
key, line = self._parseKey(line)
v.append(key)
- line = _advance(it,line)
+ line = _advance(it, line)
line = line.lstrip()
if line[0] == '=':
line = line[1:]
@@ -1180,8 +1174,7 @@ class Parser:
if proto and proto[1:] != '*':
proto = proto[1:]
if proto and proto[1:] != '*':
- raise ParseError("Missing arguments to %s on line %s" % (
- self.curEntType, self.entryLine))
+ raise ParseError("Missing arguments to %s on line %s" % (self.curEntType, self.entryLine))
if self.curEntType == 'string':
self.strings[v[0]] = v[1]
@@ -1192,7 +1185,7 @@ class Parser:
else:
key = v[0]
d = {}
- for i in xrange(1,len(v),2):
+ for i in xrange(1, len(v), 2):
d[v[i].lower()] = v[i+1]
ent = BibTeXEntry(self.curEntType, key, d)
ent.entryLine = self.entryLine
@@ -1235,7 +1228,7 @@ class Parser:
raise ParseError("Bad input at line %s (expected a new entry.)"
% it.lineno)
-def _advance(it,line):
+def _advance(it, line):
while not line or line.isspace() or COMMENT_RE.match(line):
line = it.next()
return line
@@ -1278,14 +1271,13 @@ def parseString(string, result=None):
return r
if __name__ == '__main__':
- if len(sys.argv)>1:
- fname=sys.argv[1]
+ if len(sys.argv) > 1:
+ fname = sys.argv[1]
else:
- fname="testbib/pdos.bib"
+ fname = "testbib/pdos.bib"
r = parseFile(fname)
for e in r.entries:
if e.type in ("proceedings", "journal"): continue
print e.to_html()
-