mirror of
https://github.com/shlomif/PySolFC.git
synced 2025-04-05 00:02:29 -04:00
Convert scripts/pygettext.py to python 3
This commit is contained in:
parent
f1161cdae0
commit
92a5a341c5
1 changed files with 33 additions and 33 deletions
|
@ -1,4 +1,4 @@
|
|||
#! /usr/bin/env python
|
||||
#! /usr/bin/env python3
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Originally written by Barry Warsaw <barry@zope.com>
|
||||
#
|
||||
|
@ -170,9 +170,6 @@ Options:
|
|||
If `inputfile' is -, standard input is read.
|
||||
""")
|
||||
|
||||
if sys.version_info > (3,):
|
||||
basestring = str
|
||||
|
||||
__version__ = '1.6con'
|
||||
|
||||
default_keywords = ['_']
|
||||
|
@ -204,9 +201,9 @@ msgstr ""
|
|||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print >> sys.stderr, __doc__ % globals()
|
||||
print(__doc__ % globals(), file=sys.stderr)
|
||||
if msg:
|
||||
print >> sys.stderr, msg
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
@ -438,12 +435,12 @@ class TokenEater:
|
|||
if len(data) == 2 and data[0] and data[1]:
|
||||
self.__addentry(tuple(data))
|
||||
elif self.__options.verbose:
|
||||
print >> sys.stderr, _(
|
||||
print(_(
|
||||
'*** %(file)s:%(lineno)s: incorrect '
|
||||
'ngettext format'
|
||||
) % {
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno}
|
||||
'lineno': self.__lineno}, file=sys.stderr)
|
||||
else:
|
||||
self.__addentry(EMPTYSTRING.join(self.__data))
|
||||
self.__state = self.__waiting
|
||||
|
@ -458,14 +455,14 @@ class TokenEater:
|
|||
else:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
if self.__options.verbose:
|
||||
print >> sys.stderr, _(
|
||||
print(_(
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected '
|
||||
'token "%(token)s"'
|
||||
) % {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}
|
||||
}, file=sys.stderr)
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
|
@ -484,15 +481,15 @@ class TokenEater:
|
|||
timestamp = time.ctime(time.time())
|
||||
# The time stamp in the header doesn't have the same format as that
|
||||
# generated by xgettext...
|
||||
print >> fp, pot_header % {'time': timestamp, 'version': __version__}
|
||||
print(pot_header % {'time': timestamp, 'version': __version__}, file=fp)
|
||||
# Sort the entries. First sort each particular entry's keys, then
|
||||
# sort all the entries by their first item.
|
||||
reverse = {}
|
||||
for k, v in self.__messages.items():
|
||||
keys = v.keys()
|
||||
keys = list(v.keys())
|
||||
keys.sort()
|
||||
reverse.setdefault(tuple(keys), []).append((k, v))
|
||||
rkeys = reverse.keys()
|
||||
rkeys = list(reverse.keys())
|
||||
rkeys.sort()
|
||||
for rkey in rkeys:
|
||||
rentries = reverse[rkey]
|
||||
|
@ -507,7 +504,7 @@ class TokenEater:
|
|||
# k is the message string, v is a dictionary-set of (filename,
|
||||
# lineno) tuples. We want to sort the entries in v first by
|
||||
# file name and then by line number.
|
||||
v = v.keys()
|
||||
v = list(v.keys())
|
||||
v.sort()
|
||||
if not options.writelocations:
|
||||
pass
|
||||
|
@ -515,8 +512,8 @@ class TokenEater:
|
|||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print >>fp, _(
|
||||
'# File: %(filename)s, line: %(lineno)d') % d
|
||||
print(_(
|
||||
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceeds 'options.width'
|
||||
|
@ -527,23 +524,23 @@ class TokenEater:
|
|||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
print >> fp, locline
|
||||
print(locline, file=fp)
|
||||
locline = "#:" + s
|
||||
if len(locline) > 2:
|
||||
print >> fp, locline
|
||||
print(locline, file=fp)
|
||||
if isdocstring:
|
||||
print >> fp, '#, docstring'
|
||||
if isinstance(k, basestring):
|
||||
print >> fp, 'msgid', normalize(k)
|
||||
print >> fp, 'msgstr ""\n'
|
||||
print('#, docstring', file=fp)
|
||||
if isinstance(k, str):
|
||||
print('msgid', normalize(k), file=fp)
|
||||
print('msgstr ""\n', file=fp)
|
||||
else:
|
||||
# ngettext
|
||||
assert isinstance(k, tuple)
|
||||
assert len(k) == 2
|
||||
print >> fp, 'msgid', normalize(k[0])
|
||||
print >> fp, 'msgid_plural', normalize(k[1])
|
||||
print >> fp, 'msgstr[0] ""'
|
||||
print >> fp, 'msgstr[1] ""\n'
|
||||
print('msgid', normalize(k[0]), file=fp)
|
||||
print('msgid_plural', normalize(k[1]), file=fp)
|
||||
print('msgstr[0] ""', file=fp)
|
||||
print('msgstr[1] ""\n', file=fp)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -655,8 +652,8 @@ def main():
|
|||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print >> sys.stderr, _(
|
||||
"Can't read --exclude-file: %s") % options.excludefilename
|
||||
print(_(
|
||||
"Can't read --exclude-file: %s") % options.excludefilename, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
|
@ -681,15 +678,18 @@ def main():
|
|||
else:
|
||||
if options.verbose:
|
||||
print(_('Working on %s') % filename)
|
||||
fp = open(filename)
|
||||
fp = open(filename, 'rb')
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
try:
|
||||
tokenize.tokenize(fp.readline, eater)
|
||||
for token_info in tokenize.tokenize(fp.readline):
|
||||
eater(*token_info)
|
||||
except tokenize.TokenError as e:
|
||||
print >> sys.stderr, '%s: %s, line %d, column %d' % (
|
||||
e[0], filename, e[1][0], e[1][1])
|
||||
print('%s: %s, line %d, column %d' % (
|
||||
e[0], filename, e[1][0], e[1][1]), file=sys.stderr)
|
||||
except StopTokenizing:
|
||||
pass
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
@ -713,7 +713,7 @@ def main():
|
|||
if __name__ == '__main__':
|
||||
main()
|
||||
# some more test strings
|
||||
_(u'a unicode string')
|
||||
_('a unicode string')
|
||||
# this one creates a warning
|
||||
_('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
|
||||
_('more' 'than' 'one' 'string')
|
||||
|
|
Loading…
Add table
Reference in a new issue