1
0
Fork 0
mirror of https://github.com/shlomif/PySolFC.git synced 2025-04-15 02:54:09 -04:00

Maintain compatibility with Python 2

This commit is contained in:
Roderik Ploszek 2018-03-20 23:03:53 +01:00
parent 0bd7b44ed5
commit 57485fac95
2 changed files with 28 additions and 23 deletions

View file

@ -6,7 +6,7 @@ import sys
import os import os
import time import time
# from pprint import pprint # from pprint import pprint
import builtins from six.moves import builtins
from pysollib.mygettext import fix_gettext from pysollib.mygettext import fix_gettext
import pysollib.games import pysollib.games
import pysollib.games.special import pysollib.games.special

View file

@ -26,6 +26,7 @@ import tokenize
import operator import operator
import sys import sys
import functools import functools
from six import print_, PY2
# for selftesting # for selftesting
try: try:
@ -201,9 +202,9 @@ msgstr ""
def usage(code, msg=''): def usage(code, msg=''):
print(__doc__ % globals(), file=sys.stderr) print_(__doc__ % globals(), file=sys.stderr)
if msg: if msg:
print(msg, file=sys.stderr) print_(msg, file=sys.stderr)
sys.exit(code) sys.exit(code)
@ -435,7 +436,7 @@ class TokenEater:
if len(data) == 2 and data[0] and data[1]: if len(data) == 2 and data[0] and data[1]:
self.__addentry(tuple(data)) self.__addentry(tuple(data))
elif self.__options.verbose: elif self.__options.verbose:
print(_( print_(_(
'*** %(file)s:%(lineno)s: incorrect ' '*** %(file)s:%(lineno)s: incorrect '
'ngettext format' 'ngettext format'
) % { ) % {
@ -455,7 +456,7 @@ class TokenEater:
else: else:
# warn if we see anything else than STRING or whitespace # warn if we see anything else than STRING or whitespace
if self.__options.verbose: if self.__options.verbose:
print(_( print_(_(
'*** %(file)s:%(lineno)s: Seen unexpected ' '*** %(file)s:%(lineno)s: Seen unexpected '
'token "%(token)s"' 'token "%(token)s"'
) % { ) % {
@ -481,7 +482,7 @@ class TokenEater:
timestamp = time.ctime(time.time()) timestamp = time.ctime(time.time())
# The time stamp in the header doesn't have the same format as that # The time stamp in the header doesn't have the same format as that
# generated by xgettext... # generated by xgettext...
print(pot_header % {'time': timestamp, 'version': __version__}, print_(pot_header % {'time': timestamp, 'version': __version__},
file=fp) file=fp)
# Sort the entries. First sort each particular entry's keys, then # Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item. # sort all the entries by their first item.
@ -513,7 +514,7 @@ class TokenEater:
elif options.locationstyle == options.SOLARIS: elif options.locationstyle == options.SOLARIS:
for filename, lineno in v: for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno} d = {'filename': filename, 'lineno': lineno}
print(_('# File: %(filename)s, line: %(lineno)d') % d, print_(_('# File: %(filename)s, line: %(lineno)d') % d,
file=fp) file=fp)
elif options.locationstyle == options.GNU: elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the # fit as many locations on one line, as long as the
@ -525,23 +526,23 @@ class TokenEater:
if len(locline) + len(s) <= options.width: if len(locline) + len(s) <= options.width:
locline = locline + s locline = locline + s
else: else:
print(locline, file=fp) print_(locline, file=fp)
locline = "#:" + s locline = "#:" + s
if len(locline) > 2: if len(locline) > 2:
print(locline, file=fp) print_(locline, file=fp)
if isdocstring: if isdocstring:
print('#, docstring', file=fp) print_('#, docstring', file=fp)
if isinstance(k, str): if isinstance(k, str):
print('msgid', normalize(k), file=fp) print_('msgid', normalize(k), file=fp)
print('msgstr ""\n', file=fp) print_('msgstr ""\n', file=fp)
else: else:
# ngettext # ngettext
assert isinstance(k, tuple) assert isinstance(k, tuple)
assert len(k) == 2 assert len(k) == 2
print('msgid', normalize(k[0]), file=fp) print_('msgid', normalize(k[0]), file=fp)
print('msgid_plural', normalize(k[1]), file=fp) print_('msgid_plural', normalize(k[1]), file=fp)
print('msgstr[0] ""', file=fp) print_('msgstr[0] ""', file=fp)
print('msgstr[1] ""\n', file=fp) print_('msgstr[1] ""\n', file=fp)
def main(): def main():
@ -653,8 +654,8 @@ def main():
options.toexclude = fp.readlines() options.toexclude = fp.readlines()
fp.close() fp.close()
except IOError: except IOError:
print(_("Can't read --exclude-file: %s") % options.excludefilename, print_(_("Can't read --exclude-file: %s") %
file=sys.stderr) options.excludefilename, file=sys.stderr)
sys.exit(1) sys.exit(1)
else: else:
options.toexclude = [] options.toexclude = []
@ -684,10 +685,14 @@ def main():
try: try:
eater.set_filename(filename) eater.set_filename(filename)
try: try:
if PY2:
for token_info in tokenize.generate_tokens(fp.readline):
eater(*token_info)
else:
for token_info in tokenize.tokenize(fp.readline): for token_info in tokenize.tokenize(fp.readline):
eater(*token_info) eater(*token_info)
except tokenize.TokenError as e: except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % ( print_('%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1]), file=sys.stderr) e[0], filename, e[1][0], e[1][1]), file=sys.stderr)
except tokenize.StopTokenizing: except tokenize.StopTokenizing:
pass pass