mirror of
https://github.com/shlomif/PySolFC.git
synced 2025-04-05 00:02:29 -04:00
Simplify po/pysol.pot generation
This commit is contained in:
parent
81432ef353
commit
45f8c312a4
13 changed files with 16014 additions and 14281 deletions
|
@ -19,7 +19,7 @@ include data/pysolfc.glade
|
|||
graft data/themes
|
||||
recursive-exclude data/themes *.py
|
||||
include scripts/build.bat scripts/create_iss.py scripts/mahjongg_utils.py
|
||||
include scripts/pygettext.py scripts/all_games.py scripts/cardset_viewer.py
|
||||
include scripts/all_games.py scripts/cardset_viewer.py
|
||||
include scripts/cardconv scripts/cardsetsgiftobmp
|
||||
include scripts/gen_individual_importing_tests.py
|
||||
include tests/individually-importing/PLACEHOLDER
|
||||
|
|
11
Makefile
11
Makefile
|
@ -2,11 +2,6 @@
|
|||
|
||||
export PYTHONPATH := $(PYTHONPATH):$(CURDIR)
|
||||
|
||||
PYSOLLIB_FILES = pysollib/tk/*.py pysollib/tile/*.py pysollib/*.py \
|
||||
pysollib/games/*.py pysollib/games/special/*.py \
|
||||
pysollib/games/ultra/*.py pysollib/games/mahjongg/*.py \
|
||||
pysollib/kivy/*.py
|
||||
|
||||
.PHONY: all install dist rpm all_games_html rules pot mo pretest test runtest
|
||||
|
||||
all:
|
||||
|
@ -32,10 +27,8 @@ rules:
|
|||
|
||||
pot:
|
||||
./scripts/all_games.py gettext > po/games.pot
|
||||
./scripts/pygettext.py -k n_ --ngettext-keyword ungettext -o po/pysol-1.pot $(PYSOLLIB_FILES)
|
||||
xgettext -L C --keyword=N_ -o po/pysol-2.pot data/glade-translations
|
||||
msgcat po/pysol-1.pot po/pysol-2.pot > po/pysol.pot
|
||||
rm -f po/pysol-1.pot po/pysol-2.pot
|
||||
xgettext --keyword=n_ -o po/pysol.pot \
|
||||
pysollib/*.py pysollib/*/*.py pysollib/*/*/*.py data/pysolfc.glade
|
||||
set -e; \
|
||||
for lng in ru de pl it; do \
|
||||
msgmerge --update --quiet --backup=none po/$${lng}_pysol.po po/pysol.pot; \
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Translatable strings file generated by Glade.
|
||||
* Add this file to your project's POTFILES.in.
|
||||
* DO NOT compile it as part of your application.
|
||||
*/
|
||||
|
||||
gchar *s = N_("Game Statistics");
|
||||
gchar *s = N_("Game:");
|
||||
gchar *s = N_("Won:");
|
||||
gchar *s = N_("Total:");
|
||||
gchar *s = N_("Lost:");
|
||||
gchar *s = N_("Current session");
|
||||
gchar *s = N_("Won:");
|
||||
gchar *s = N_("Lost:");
|
||||
gchar *s = N_("Total:");
|
||||
gchar *s = N_("Total");
|
||||
gchar *s = N_("Current game");
|
||||
gchar *s = N_("Playing time:");
|
||||
gchar *s = N_("Moves:");
|
||||
gchar *s = N_("Total moves:");
|
||||
gchar *s = N_("Minimum");
|
||||
gchar *s = N_("Maximum");
|
||||
gchar *s = N_("Average");
|
||||
gchar *s = N_("Summary");
|
||||
gchar *s = N_("Playing time");
|
||||
gchar *s = N_("Moves");
|
||||
gchar *s = N_("Total moves");
|
||||
gchar *s = N_("Game:");
|
||||
gchar *s = N_("Top 10");
|
||||
gchar *s = N_("All games");
|
||||
gchar *s = N_("Full log");
|
||||
gchar *s = N_("Session log");
|
||||
gchar *s = N_("Set timeouts");
|
||||
gchar *s = N_("Demo:");
|
||||
gchar *s = N_("Hint:");
|
||||
gchar *s = N_("Raise card:");
|
||||
gchar *s = N_("Highlight piles:");
|
||||
gchar *s = N_("Highlight cards:");
|
||||
gchar *s = N_("Highlight same rank:");
|
||||
gchar *s = N_("Set colors");
|
||||
gchar *s = N_("Highlight piles:");
|
||||
gchar *s = N_("Highlight cards 1:");
|
||||
gchar *s = N_("Highlight cards 2:");
|
||||
gchar *s = N_("Highlight same rank 1:");
|
||||
gchar *s = N_("Highlight same rank 2:");
|
||||
gchar *s = N_("Hint arrow:");
|
||||
gchar *s = N_("Highlight not matching:");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Text foreground:");
|
||||
gchar *s = N_("Set font");
|
||||
gchar *s = N_("HTML: ");
|
||||
gchar *s = N_("Small: ");
|
||||
gchar *s = N_("Fixed: ");
|
||||
gchar *s = N_("Tableau default: ");
|
||||
gchar *s = N_("Tableau fixed: ");
|
||||
gchar *s = N_("Tableau small: ");
|
||||
gchar *s = N_("Tableau large: ");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Change...");
|
||||
gchar *s = N_("Sound settings");
|
||||
gchar *s = N_("Sound enabled");
|
||||
gchar *s = N_("Sample volume:");
|
||||
gchar *s = N_("Music volume:");
|
||||
gchar *s = N_("Enable samles");
|
5021
po/de_pysol.po
5021
po/de_pysol.po
File diff suppressed because it is too large
Load diff
11
po/games.pot
11
po/games.pot
|
@ -5,7 +5,7 @@
|
|||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PySol 0.0.1\n"
|
||||
"POT-Creation-Date: Mon Mar 7 21:38:07 2011\n"
|
||||
"POT-Creation-Date: Thu Jul 18 19:36:14 2019\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
|
@ -702,6 +702,9 @@ msgstr ""
|
|||
msgid "Czarina"
|
||||
msgstr ""
|
||||
|
||||
msgid "Daddy Longlegs"
|
||||
msgstr ""
|
||||
|
||||
msgid "Danda"
|
||||
msgstr ""
|
||||
|
||||
|
@ -1272,6 +1275,12 @@ msgstr ""
|
|||
msgid "FreeCell"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Two Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Zero Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "Frog"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
@ -5,10 +5,11 @@
|
|||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: it_games\n"
|
||||
"POT-Creation-Date: Thu Sep 6 15:06:46 2007\n"
|
||||
"POT-Creation-Date: Thu Jul 18 18:43:59 2019\n"
|
||||
"PO-Revision-Date: 2011-05-12 18:46+0200\n"
|
||||
"Last-Translator: Giuliano Colla <giuliano.colla@gmail.com>\n"
|
||||
"Language-Team: Italiano <it@li.org>\n"
|
||||
"Language: \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
@ -75,6 +76,9 @@ msgstr "Conoscenti"
|
|||
msgid "Adela"
|
||||
msgstr "Adelina"
|
||||
|
||||
msgid "Aglet"
|
||||
msgstr ""
|
||||
|
||||
msgid "Agnes Bernauer"
|
||||
msgstr "Agnese Bernauer"
|
||||
|
||||
|
@ -243,6 +247,12 @@ msgstr ""
|
|||
msgid "Bavarian Patience"
|
||||
msgstr ""
|
||||
|
||||
msgid "Bayan"
|
||||
msgstr ""
|
||||
|
||||
msgid "Beacon"
|
||||
msgstr ""
|
||||
|
||||
msgid "Beak and Flipper"
|
||||
msgstr ""
|
||||
|
||||
|
@ -522,6 +532,9 @@ msgstr ""
|
|||
msgid "Cat's Tail"
|
||||
msgstr ""
|
||||
|
||||
msgid "Catherine the Great"
|
||||
msgstr ""
|
||||
|
||||
msgid "Cavalier"
|
||||
msgstr ""
|
||||
|
||||
|
@ -690,6 +703,9 @@ msgstr ""
|
|||
msgid "Czarina"
|
||||
msgstr ""
|
||||
|
||||
msgid "Daddy Longlegs"
|
||||
msgstr ""
|
||||
|
||||
msgid "Danda"
|
||||
msgstr ""
|
||||
|
||||
|
@ -804,6 +820,9 @@ msgstr ""
|
|||
msgid "Double Bisley"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Blue Moon"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Canfield"
|
||||
msgstr ""
|
||||
|
||||
|
@ -876,12 +895,18 @@ msgstr ""
|
|||
msgid "Double Measure"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Montana"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Pyramid"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Rail"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Red Moon"
|
||||
msgstr ""
|
||||
|
||||
msgid "Double Russian Solitaire"
|
||||
msgstr ""
|
||||
|
||||
|
@ -942,6 +967,9 @@ msgstr ""
|
|||
msgid "Dutch Solitaire"
|
||||
msgstr ""
|
||||
|
||||
msgid "Dutchess"
|
||||
msgstr ""
|
||||
|
||||
msgid "Eagle Wing"
|
||||
msgstr ""
|
||||
|
||||
|
@ -972,6 +1000,9 @@ msgstr ""
|
|||
msgid "Eight Off"
|
||||
msgstr ""
|
||||
|
||||
msgid "Eight Packs"
|
||||
msgstr ""
|
||||
|
||||
msgid "Eight Sages"
|
||||
msgstr ""
|
||||
|
||||
|
@ -1158,6 +1189,9 @@ msgstr ""
|
|||
msgid "Flying Dragon"
|
||||
msgstr ""
|
||||
|
||||
msgid "Foothold"
|
||||
msgstr ""
|
||||
|
||||
msgid "Footling"
|
||||
msgstr ""
|
||||
|
||||
|
@ -1242,6 +1276,12 @@ msgstr ""
|
|||
msgid "FreeCell"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Two Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Zero Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "Frog"
|
||||
msgstr ""
|
||||
|
||||
|
@ -1425,6 +1465,9 @@ msgstr ""
|
|||
msgid "Hanoi Puzzle 6"
|
||||
msgstr ""
|
||||
|
||||
msgid "Hanoi Sequence"
|
||||
msgstr ""
|
||||
|
||||
msgid "Happy New Year"
|
||||
msgstr ""
|
||||
|
||||
|
@ -1638,6 +1681,9 @@ msgstr ""
|
|||
msgid "Katrina's Game Relaxed"
|
||||
msgstr ""
|
||||
|
||||
msgid "Kentish"
|
||||
msgstr ""
|
||||
|
||||
msgid "Khadga"
|
||||
msgstr ""
|
||||
|
||||
|
@ -2310,7 +2356,7 @@ msgstr ""
|
|||
msgid "Mahjongg Stargate"
|
||||
msgstr ""
|
||||
|
||||
msgid "Mahjongg Step Pyramid"
|
||||
msgid "Mahjongg Steps Pyramid"
|
||||
msgstr ""
|
||||
|
||||
msgid "Mahjongg Stonehenge"
|
||||
|
@ -2652,6 +2698,15 @@ msgstr ""
|
|||
msgid "Northwest Territory"
|
||||
msgstr ""
|
||||
|
||||
msgid "Not Shisen-Sho 14x6"
|
||||
msgstr ""
|
||||
|
||||
msgid "Not Shisen-Sho 18x8"
|
||||
msgstr ""
|
||||
|
||||
msgid "Not Shisen-Sho 24x12"
|
||||
msgstr ""
|
||||
|
||||
msgid "Number Ten"
|
||||
msgstr ""
|
||||
|
||||
|
@ -2904,6 +2959,9 @@ msgstr ""
|
|||
msgid "Puss in the Corner"
|
||||
msgstr ""
|
||||
|
||||
msgid "Putt Putt"
|
||||
msgstr ""
|
||||
|
||||
msgid "Pyramid"
|
||||
msgstr ""
|
||||
|
||||
|
@ -2925,12 +2983,18 @@ msgstr ""
|
|||
msgid "Quadrangle"
|
||||
msgstr ""
|
||||
|
||||
msgid "Quadrille"
|
||||
msgstr ""
|
||||
|
||||
msgid "Quadruple Alliance"
|
||||
msgstr ""
|
||||
|
||||
msgid "Quads"
|
||||
msgstr ""
|
||||
|
||||
msgid "Quads +"
|
||||
msgstr ""
|
||||
|
||||
msgid "Quartets"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3000,6 +3064,10 @@ msgstr ""
|
|||
msgid "Relax"
|
||||
msgstr ""
|
||||
|
||||
#, fuzzy
|
||||
msgid "Relaxed Accordion"
|
||||
msgstr "Fisarmonica"
|
||||
|
||||
msgid "Relaxed FreeCell"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3015,6 +3083,9 @@ msgstr ""
|
|||
msgid "Relaxed Spider"
|
||||
msgstr ""
|
||||
|
||||
msgid "Relaxed Three Fir-trees"
|
||||
msgstr ""
|
||||
|
||||
msgid "Repair"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3228,13 +3299,13 @@ msgstr ""
|
|||
msgid "Shifting"
|
||||
msgstr ""
|
||||
|
||||
msgid "Shisen-Sho (No Gra) 14x6"
|
||||
msgid "Shisen-Sho (No Gravity) 14x6"
|
||||
msgstr ""
|
||||
|
||||
msgid "Shisen-Sho (No Gra) 18x8"
|
||||
msgid "Shisen-Sho (No Gravity) 18x8"
|
||||
msgstr ""
|
||||
|
||||
msgid "Shisen-Sho (No Gra) 24x12"
|
||||
msgid "Shisen-Sho (No Gravity) 24x12"
|
||||
msgstr ""
|
||||
|
||||
msgid "Shisen-Sho 14x6"
|
||||
|
@ -3423,15 +3494,15 @@ msgstr ""
|
|||
msgid "Stargate"
|
||||
msgstr ""
|
||||
|
||||
msgid "Step Pyramid"
|
||||
msgstr ""
|
||||
|
||||
msgid "Step-Up"
|
||||
msgstr ""
|
||||
|
||||
msgid "Steps"
|
||||
msgstr ""
|
||||
|
||||
msgid "Steps Pyramid"
|
||||
msgstr ""
|
||||
|
||||
msgid "Steve"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3597,6 +3668,9 @@ msgstr ""
|
|||
msgid "Thieves of Egypt"
|
||||
msgstr ""
|
||||
|
||||
msgid "Thirteen Packs"
|
||||
msgstr ""
|
||||
|
||||
msgid "Thirteen Up"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3864,6 +3938,9 @@ msgstr ""
|
|||
msgid "Waning Moon"
|
||||
msgstr ""
|
||||
|
||||
msgid "Wasatch"
|
||||
msgstr ""
|
||||
|
||||
msgid "Washington's Favorite"
|
||||
msgstr ""
|
||||
|
||||
|
@ -3968,4 +4045,3 @@ msgstr ""
|
|||
|
||||
msgid "Zodiac"
|
||||
msgstr ""
|
||||
|
||||
|
|
6337
po/it_pysol.po
6337
po/it_pysol.po
File diff suppressed because it is too large
Load diff
|
@ -4,10 +4,11 @@
|
|||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PySol 0.0.1\n"
|
||||
"POT-Creation-Date: Mon Mar 7 21:38:07 2011\n"
|
||||
"POT-Creation-Date: Thu Jul 18 18:43:59 2019\n"
|
||||
"PO-Revision-Date: 2010-12-16 23:56+0100\n"
|
||||
"Last-Translator: Jerzy Trzeciak <artusek@wp.pl>\n"
|
||||
"Language-Team: Polish <pl@li.org>\n"
|
||||
"Language: pl\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
@ -705,6 +706,9 @@ msgstr ""
|
|||
msgid "Czarina"
|
||||
msgstr "Czarina"
|
||||
|
||||
msgid "Daddy Longlegs"
|
||||
msgstr ""
|
||||
|
||||
msgid "Danda"
|
||||
msgstr "Danda"
|
||||
|
||||
|
@ -1277,6 +1281,12 @@ msgstr "Free Napoleon"
|
|||
msgid "FreeCell"
|
||||
msgstr "FreeCell"
|
||||
|
||||
msgid "FreeCell with Two Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Zero Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "Frog"
|
||||
msgstr "Żaba"
|
||||
|
||||
|
|
6012
po/pl_pysol.po
6012
po/pl_pysol.po
File diff suppressed because it is too large
Load diff
5659
po/pysol.pot
5659
po/pysol.pot
File diff suppressed because it is too large
Load diff
|
@ -5,10 +5,11 @@
|
|||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PySol 0.0.1\n"
|
||||
"POT-Creation-Date: Mon Mar 7 21:38:07 2011\n"
|
||||
"POT-Creation-Date: Thu Jul 18 18:43:59 2019\n"
|
||||
"PO-Revision-Date: 2007-09-05 17:43+0400\n"
|
||||
"Last-Translator: Скоморох <skomoroh@gmail.com>\n"
|
||||
"Language-Team: Russian <ru@li.org>\n"
|
||||
"Language: ru\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: utf-8\n"
|
||||
|
@ -707,6 +708,10 @@ msgstr "Творог и сыворотка"
|
|||
msgid "Czarina"
|
||||
msgstr "Царевна"
|
||||
|
||||
#, fuzzy
|
||||
msgid "Daddy Longlegs"
|
||||
msgstr "Тенистые аллеи"
|
||||
|
||||
#, fuzzy
|
||||
msgid "Danda"
|
||||
msgstr "Алмаз"
|
||||
|
@ -1289,6 +1294,12 @@ msgstr "Свободный Наполеон"
|
|||
msgid "FreeCell"
|
||||
msgstr "Свободная ячейка"
|
||||
|
||||
msgid "FreeCell with Two Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "FreeCell with Zero Reserves"
|
||||
msgstr ""
|
||||
|
||||
msgid "Frog"
|
||||
msgstr "Лягушка"
|
||||
|
||||
|
|
6322
po/ru_pysol.po
6322
po/ru_pysol.po
File diff suppressed because it is too large
Load diff
|
@ -1,725 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Originally written by Barry Warsaw <barry@zope.com>
|
||||
#
|
||||
# Minimally patched to make it even more xgettext compatible
|
||||
# by Peter Funk <pf@artcom-gmbh.de>
|
||||
#
|
||||
# 2002-11-22 Jürgen Hermann <jh@web.de>
|
||||
# Added checks that _() only contains string literals, and
|
||||
# command line args are resolved to module lists, i.e. you
|
||||
# can now pass a filename, a module or package name, or a
|
||||
# directory (including globbing chars, important for Win32).
|
||||
# Made docstring fit in 80 chars wide displays using pydoc.
|
||||
#
|
||||
# 2007-05-11 Scomoroh <scomoroh@gmail.com>
|
||||
# Added very simple support for ngettext
|
||||
#
|
||||
|
||||
import functools
|
||||
import getopt
|
||||
import glob
|
||||
import imp
|
||||
import operator
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import token
|
||||
import tokenize
|
||||
|
||||
from six import PY2, print_
|
||||
# for selftesting
|
||||
try:
|
||||
import fintl
|
||||
_ = fintl.gettext
|
||||
except ImportError:
|
||||
def _(s):
|
||||
return s
|
||||
|
||||
__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
|
||||
|
||||
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
|
||||
internationalization of C programs. Most of these tools are independent of
|
||||
the programming language and can be used from within Python programs.
|
||||
Martin von Loewis' work[1] helps considerably in this regard.
|
||||
|
||||
There's one problem though; xgettext is the program that scans source code
|
||||
looking for message strings, but it groks only C (or C++). Python
|
||||
introduces a few wrinkles, such as dual quoting characters, triple quoted
|
||||
strings, and raw strings. xgettext understands none of this.
|
||||
|
||||
Enter pygettext, which uses Python's standard tokenize module to scan
|
||||
Python source code, generating .pot files identical to what GNU xgettext[2]
|
||||
generates for C and C++ code. From there, the standard GNU tools can be
|
||||
used.
|
||||
|
||||
A word about marking Python strings as candidates for translation. GNU
|
||||
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
|
||||
and gettext_noop. But those can be a lot of text to include all over your
|
||||
code. C and C++ have a trick: they use the C preprocessor. Most
|
||||
internationalized C source includes a #define for gettext() to _() so that
|
||||
what has to be written in the source is much less. Thus these are both
|
||||
translatable strings:
|
||||
|
||||
gettext("Translatable String")
|
||||
_("Translatable String")
|
||||
|
||||
Python of course has no preprocessor so this doesn't work so well. Thus,
|
||||
pygettext searches only for _() by default, but see the -k/--keyword flag
|
||||
below for how to augment this.
|
||||
|
||||
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
|
||||
[2] http://www.gnu.org/software/gettext/gettext.html
|
||||
|
||||
NOTE: pygettext attempts to be option and feature compatible with GNU
|
||||
xgettext where ever possible. However some options are still missing or are
|
||||
not fully implemented. Also, xgettext's use of command line switches with
|
||||
option arguments is broken, and in these cases, pygettext just defines
|
||||
additional switches.
|
||||
|
||||
Usage: pygettext [options] inputfile ...
|
||||
|
||||
Options:
|
||||
|
||||
-a
|
||||
--extract-all
|
||||
Extract all strings.
|
||||
|
||||
-d name
|
||||
--default-domain=name
|
||||
Rename the default output file from messages.pot to name.pot.
|
||||
|
||||
-E
|
||||
--escape
|
||||
Replace non-ASCII characters with octal escape sequences.
|
||||
|
||||
-D
|
||||
--docstrings
|
||||
Extract module, class, method, and function docstrings. These do
|
||||
not need to be wrapped in _() markers, and in fact cannot be for
|
||||
Python to consider them docstrings. (See also the -X option).
|
||||
|
||||
-h
|
||||
--help
|
||||
Print this help message and exit.
|
||||
|
||||
-k word
|
||||
--keyword=word
|
||||
Keywords to look for in addition to the default set, which are:
|
||||
%(DEFAULTKEYWORDS)s
|
||||
|
||||
You can have multiple -k flags on the command line.
|
||||
|
||||
-K
|
||||
--no-default-keywords
|
||||
Disable the default set of keywords (see above). Any keywords
|
||||
explicitly added with the -k/--keyword option are still recognized.
|
||||
|
||||
--no-location
|
||||
Do not write filename/lineno location comments.
|
||||
|
||||
-n
|
||||
--add-location
|
||||
Write filename/lineno location comments indicating where each
|
||||
extracted string is found in the source. These lines appear before
|
||||
each msgid. The style of comments is controlled by the -S/--style
|
||||
option. This is the default.
|
||||
|
||||
-o filename
|
||||
--output=filename
|
||||
Rename the default output file from messages.pot to filename. If
|
||||
filename is `-' then the output is sent to standard out.
|
||||
|
||||
-p dir
|
||||
--output-dir=dir
|
||||
Output files will be placed in directory dir.
|
||||
|
||||
-S stylename
|
||||
--style stylename
|
||||
Specify which style to use for location comments. Two styles are
|
||||
supported:
|
||||
|
||||
Solaris # File: filename, line: line-number
|
||||
GNU #: filename:line
|
||||
|
||||
The style name is case insensitive. GNU style is the default.
|
||||
|
||||
-v
|
||||
--verbose
|
||||
Print the names of the files being processed.
|
||||
|
||||
-V
|
||||
--version
|
||||
Print the version of pygettext and exit.
|
||||
|
||||
-w columns
|
||||
--width=columns
|
||||
Set width of output to columns.
|
||||
|
||||
-x filename
|
||||
--exclude-file=filename
|
||||
Specify a file that contains a list of strings that are not be
|
||||
extracted from the input files. Each string to be excluded must
|
||||
appear on a line by itself in the file.
|
||||
|
||||
-X filename
|
||||
--no-docstrings=filename
|
||||
Specify a file that contains a list of files (one per line) that
|
||||
should not have their docstrings extracted. This is only useful in
|
||||
conjunction with the -D option above.
|
||||
|
||||
If `inputfile' is -, standard input is read.
|
||||
""")
|
||||
|
||||
__version__ = '1.6con'
|
||||
|
||||
default_keywords = ['_']
|
||||
DEFAULTKEYWORDS = ', '.join(default_keywords)
|
||||
default_ngettext_keywords = ['ngettext']
|
||||
|
||||
EMPTYSTRING = ''
|
||||
|
||||
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
|
||||
# there.
|
||||
pot_header = _('''\
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR ORGANIZATION
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\\n"
|
||||
"POT-Creation-Date: %(time)s\\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Content-Type: text/plain; charset=CHARSET\\n"
|
||||
"Content-Transfer-Encoding: ENCODING\\n"
|
||||
"Generated-By: pygettext.py %(version)s\\n"
|
||||
|
||||
''')
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print_(__doc__ % globals(), file=sys.stderr)
|
||||
if msg:
|
||||
print_(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
escapes = []
|
||||
|
||||
|
||||
def make_escapes(pass_iso8859):
|
||||
global escapes
|
||||
if pass_iso8859:
|
||||
# Allow iso-8859 characters to pass through so that e.g. 'msgid
|
||||
# "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
|
||||
# escape any character outside the 32..126 range.
|
||||
mod = 128
|
||||
else:
|
||||
mod = 256
|
||||
for i in range(256):
|
||||
if 32 <= (i % mod) <= 126:
|
||||
escapes.append(chr(i))
|
||||
else:
|
||||
escapes.append("\\%03o" % i)
|
||||
escapes[ord('\\')] = '\\\\'
|
||||
escapes[ord('\t')] = '\\t'
|
||||
escapes[ord('\r')] = '\\r'
|
||||
escapes[ord('\n')] = '\\n'
|
||||
escapes[ord('\"')] = '\\"'
|
||||
|
||||
|
||||
def escape(s):
|
||||
global escapes
|
||||
s = list(s)
|
||||
for i in range(len(s)):
|
||||
s[i] = escapes[ord(s[i])]
|
||||
return EMPTYSTRING.join(s)
|
||||
|
||||
|
||||
def safe_eval(s):
|
||||
# unwrap quotes, safely
|
||||
return eval(s, {'__builtins__': {}}, {})
|
||||
|
||||
|
||||
def normalize(s):
|
||||
# This converts the various Python string types into a format that is
|
||||
# appropriate for .po files, namely much closer to C style.
|
||||
lines = s.split('\n')
|
||||
if len(lines) == 1:
|
||||
s = '"' + escape(s) + '"'
|
||||
else:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
for i in range(len(lines)):
|
||||
lines[i] = escape(lines[i])
|
||||
lineterm = '\\n"\n"'
|
||||
s = '""\n"' + lineterm.join(lines) + '"'
|
||||
return s
|
||||
|
||||
|
||||
def containsAny(str, set):
|
||||
"""Check whether 'str' contains ANY of the chars in 'set'"""
|
||||
return 1 in [c in str for c in set]
|
||||
|
||||
|
||||
def _visit_pyfiles(list, dirname, names):
|
||||
"""Helper for getFilesForName()."""
|
||||
# get extension for python source files
|
||||
if '_py_ext' not in globals():
|
||||
global _py_ext
|
||||
_py_ext = [triple[0] for triple in imp.get_suffixes()
|
||||
if triple[2] == imp.PY_SOURCE][0]
|
||||
|
||||
# don't recurse into CVS directories
|
||||
if 'CVS' in names:
|
||||
names.remove('CVS')
|
||||
|
||||
# add all *.py files to list
|
||||
list.extend(
|
||||
[os.path.join(dirname, file) for file in names
|
||||
if os.path.splitext(file)[1] == _py_ext]
|
||||
)
|
||||
|
||||
|
||||
def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
"""Get the filesystem path for a module or a package.
|
||||
|
||||
Return the file system path to a file for a module, and to a directory for
|
||||
a package. Return None if the name is not found, or is a builtin or
|
||||
extension module.
|
||||
"""
|
||||
# split off top-most name
|
||||
parts = dotted_name.split('.', 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
# we have a dotted path, import top-level package
|
||||
try:
|
||||
file, pathname, description = imp.find_module(parts[0], pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
# check if it's indeed a package
|
||||
if description[2] == imp.PKG_DIRECTORY:
|
||||
# recursively handle the remaining name parts
|
||||
pathname = _get_modpkg_path(parts[1], [pathname])
|
||||
else:
|
||||
pathname = None
|
||||
else:
|
||||
# plain name
|
||||
try:
|
||||
file, pathname, description = imp.find_module(
|
||||
dotted_name, pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
|
||||
pathname = None
|
||||
except ImportError:
|
||||
pathname = None
|
||||
|
||||
return pathname
|
||||
|
||||
|
||||
def getFilesForName(name):
|
||||
"""Get a list of module files for a filename, a module or package name,
|
||||
or a directory.
|
||||
"""
|
||||
if not os.path.exists(name):
|
||||
# check for glob chars
|
||||
if containsAny(name, "*?[]"):
|
||||
files = glob.glob(name)
|
||||
list = []
|
||||
for file in files:
|
||||
list.extend(getFilesForName(file))
|
||||
return list
|
||||
|
||||
# try to find module or package
|
||||
name = _get_modpkg_path(name)
|
||||
if not name:
|
||||
return []
|
||||
|
||||
if os.path.isdir(name):
|
||||
# find all python files in directory
|
||||
list = []
|
||||
os.path.walk(name, _visit_pyfiles, list)
|
||||
return list
|
||||
elif os.path.exists(name):
|
||||
# a single file
|
||||
return [name]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class TokenEater:
|
||||
def __init__(self, options):
|
||||
self.__options = options
|
||||
self.__messages = {}
|
||||
self.__state = self.__waiting
|
||||
self.__data = []
|
||||
self.__lineno = -1
|
||||
self.__freshmodule = 1
|
||||
self.__curfile = None
|
||||
self.__ngettext = False
|
||||
|
||||
def __call__(self, ttype, tstring, stup, etup, line):
|
||||
# dispatch
|
||||
# import token
|
||||
# print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
|
||||
# 'tstring:', tstring
|
||||
self.__state(ttype, tstring, stup[0])
|
||||
|
||||
def __waiting(self, ttype, tstring, lineno):
|
||||
opts = self.__options
|
||||
# Do docstring extractions, if enabled
|
||||
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
|
||||
# module docstring?
|
||||
if self.__freshmodule:
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__freshmodule = 0
|
||||
elif ttype not in (tokenize.COMMENT, tokenize.NL):
|
||||
self.__freshmodule = 0
|
||||
return
|
||||
# class docstring?
|
||||
if ttype == tokenize.NAME and tstring in ('class', 'def'):
|
||||
self.__state = self.__suiteseen
|
||||
return
|
||||
if ttype == tokenize.NAME and tstring in opts.keywords:
|
||||
self.__state = self.__keywordseen
|
||||
self.__ngettext = tstring in opts.ngettext_keywords
|
||||
|
||||
def __suiteseen(self, ttype, tstring, lineno):
|
||||
# ignore anything until we see the colon
|
||||
if ttype == tokenize.OP and tstring == ':':
|
||||
self.__state = self.__suitedocstring
|
||||
|
||||
def __suitedocstring(self, ttype, tstring, lineno):
|
||||
# ignore any intervening noise
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__state = self.__waiting
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
|
||||
tokenize.COMMENT):
|
||||
# there was no class docstring
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __keywordseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == '(':
|
||||
self.__data = []
|
||||
self.__lineno = lineno
|
||||
self.__state = self.__openseen
|
||||
else:
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __openseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == ')':
|
||||
# We've seen the last of the translatable strings. Record the
|
||||
# line number of the first line of the strings and update the list
|
||||
# of messages seen. Reset state for the next batch. If there
|
||||
# were no strings inside _(), then just ignore this entry.
|
||||
if self.__data:
|
||||
if self.__ngettext:
|
||||
data = []
|
||||
msg = []
|
||||
for s in self.__data:
|
||||
if s is not None:
|
||||
msg.append(s)
|
||||
else:
|
||||
data.append(EMPTYSTRING.join(msg))
|
||||
msg = []
|
||||
if len(data) == 2 and data[0] and data[1]:
|
||||
self.__addentry(tuple(data))
|
||||
elif self.__options.verbose:
|
||||
print_(_(
|
||||
'*** %(file)s:%(lineno)s: incorrect '
|
||||
'ngettext format'
|
||||
) % {
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno}, file=sys.stderr)
|
||||
else:
|
||||
self.__addentry(EMPTYSTRING.join(self.__data))
|
||||
self.__state = self.__waiting
|
||||
elif ttype == tokenize.STRING:
|
||||
self.__data.append(safe_eval(tstring))
|
||||
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
|
||||
token.NEWLINE, tokenize.NL]:
|
||||
if self.__ngettext and ttype == tokenize.OP and tstring == ',':
|
||||
self.__data.append(None)
|
||||
elif self.__ngettext: # and ttype == tokenize.NUMBER:
|
||||
pass
|
||||
else:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
if self.__options.verbose:
|
||||
print_(_(
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected '
|
||||
'token "%(token)s"'
|
||||
) % {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}, file=sys.stderr)
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
if lineno is None:
|
||||
lineno = self.__lineno
|
||||
if msg not in self.__options.toexclude:
|
||||
entry = (self.__curfile, lineno)
|
||||
self.__messages.setdefault(msg, {})[entry] = isdocstring
|
||||
|
||||
def set_filename(self, filename):
|
||||
self.__curfile = filename
|
||||
self.__freshmodule = 1
|
||||
|
||||
def write(self, fp):
|
||||
options = self.__options
|
||||
timestamp = time.ctime(time.time())
|
||||
# The time stamp in the header doesn't have the same format as that
|
||||
# generated by xgettext...
|
||||
print_(pot_header % {'time': timestamp, 'version': __version__},
|
||||
file=fp)
|
||||
# Sort the entries. First sort each particular entry's keys, then
|
||||
# sort all the entries by their first item.
|
||||
reverse = {}
|
||||
for k, v in self.__messages.items():
|
||||
keys = list(v.keys())
|
||||
keys.sort()
|
||||
reverse.setdefault(tuple(keys), []).append((k, v))
|
||||
rkeys = list(reverse.keys())
|
||||
rkeys.sort()
|
||||
for rkey in rkeys:
|
||||
rentries = reverse[rkey]
|
||||
rentries.sort()
|
||||
for k, v in rentries:
|
||||
isdocstring = 0
|
||||
# If the entry was gleaned out of a docstring, then add a
|
||||
# comment stating so. This is to aid translators who may wish
|
||||
# to skip translating some unimportant docstrings.
|
||||
if functools.reduce(operator.__add__, v.values()):
|
||||
isdocstring = 1
|
||||
# k is the message string, v is a dictionary-set of (filename,
|
||||
# lineno) tuples. We want to sort the entries in v first by
|
||||
# file name and then by line number.
|
||||
v = list(v.keys())
|
||||
v.sort()
|
||||
if not options.writelocations:
|
||||
pass
|
||||
# location comments are different b/w Solaris and GNU:
|
||||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print_(_('# File: %(filename)s, line: %(lineno)d') % d,
|
||||
file=fp)
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceeds 'options.width'
|
||||
locline = '#:'
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
s = _(' %(filename)s:%(lineno)d') % d
|
||||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
print_(locline, file=fp)
|
||||
locline = "#:" + s
|
||||
if len(locline) > 2:
|
||||
print_(locline, file=fp)
|
||||
if isdocstring:
|
||||
print_('#, docstring', file=fp)
|
||||
if isinstance(k, str):
|
||||
print_('msgid', normalize(k), file=fp)
|
||||
print_('msgstr ""\n', file=fp)
|
||||
else:
|
||||
# ngettext
|
||||
assert isinstance(k, tuple)
|
||||
assert len(k) == 2
|
||||
print_('msgid', normalize(k[0]), file=fp)
|
||||
print_('msgid_plural', normalize(k[1]), file=fp)
|
||||
print_('msgstr[0] ""', file=fp)
|
||||
print_('msgstr[1] ""\n', file=fp)
|
||||
|
||||
|
||||
def main():
|
||||
global default_keywords
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
sys.argv[1:],
|
||||
'ad:DEhk:Kno:p:S:Vvw:x:X:',
|
||||
['extract-all', 'default-domain=', 'escape', 'help',
|
||||
'keyword=', 'no-default-keywords', 'ngettext-keyword=',
|
||||
'add-location', 'no-location', 'output=', 'output-dir=',
|
||||
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
|
||||
'docstrings', 'no-docstrings',
|
||||
])
|
||||
except getopt.error as msg:
|
||||
usage(1, msg)
|
||||
|
||||
# for holding option values
|
||||
class Options:
|
||||
# constants
|
||||
GNU = 1
|
||||
SOLARIS = 2
|
||||
# defaults
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
escape = 0
|
||||
keywords = []
|
||||
ngettext_keywords = []
|
||||
outpath = ''
|
||||
outfile = 'messages.pot'
|
||||
writelocations = 1
|
||||
locationstyle = GNU
|
||||
verbose = 0
|
||||
width = 78
|
||||
excludefilename = ''
|
||||
docstrings = 0
|
||||
nodocstrings = {}
|
||||
|
||||
options = Options()
|
||||
locations = {'gnu': options.GNU,
|
||||
'solaris': options.SOLARIS,
|
||||
}
|
||||
|
||||
# parse options
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-a', '--extract-all'):
|
||||
options.extractall = 1
|
||||
elif opt in ('-d', '--default-domain'):
|
||||
options.outfile = arg + '.pot'
|
||||
elif opt in ('-E', '--escape'):
|
||||
options.escape = 1
|
||||
elif opt in ('-D', '--docstrings'):
|
||||
options.docstrings = 1
|
||||
elif opt in ('-k', '--keyword'):
|
||||
options.keywords.append(arg)
|
||||
elif opt in ('--ngettext-keyword'):
|
||||
options.ngettext_keywords.append(arg)
|
||||
elif opt in ('-K', '--no-default-keywords'):
|
||||
default_keywords = []
|
||||
elif opt in ('-n', '--add-location'):
|
||||
options.writelocations = 1
|
||||
elif opt in ('--no-location',):
|
||||
options.writelocations = 0
|
||||
elif opt in ('-S', '--style'):
|
||||
options.locationstyle = locations.get(arg.lower())
|
||||
if options.locationstyle is None:
|
||||
usage(1, _('Invalid value for --style: %s') % arg)
|
||||
elif opt in ('-o', '--output'):
|
||||
options.outfile = arg
|
||||
elif opt in ('-p', '--output-dir'):
|
||||
options.outpath = arg
|
||||
elif opt in ('-v', '--verbose'):
|
||||
options.verbose = 1
|
||||
elif opt in ('-V', '--version'):
|
||||
print(_('pygettext.py (xgettext for Python) %s') % __version__)
|
||||
sys.exit(0)
|
||||
elif opt in ('-w', '--width'):
|
||||
try:
|
||||
options.width = int(arg)
|
||||
except ValueError:
|
||||
usage(1, _('--width argument must be an integer: %s') % arg)
|
||||
elif opt in ('-x', '--exclude-file'):
|
||||
options.excludefilename = arg
|
||||
elif opt in ('-X', '--no-docstrings'):
|
||||
fp = open(arg)
|
||||
try:
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
options.nodocstrings[line[:-1]] = 1
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# calculate escapes
|
||||
make_escapes(options.escape)
|
||||
|
||||
# calculate all keywords
|
||||
options.keywords.extend(default_keywords)
|
||||
|
||||
options.ngettext_keywords.extend(default_ngettext_keywords)
|
||||
options.keywords.extend(options.ngettext_keywords)
|
||||
|
||||
# initialize list of strings to exclude
|
||||
if options.excludefilename:
|
||||
try:
|
||||
fp = open(options.excludefilename)
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print_(_("Can't read --exclude-file: %s") %
|
||||
options.excludefilename, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
|
||||
# resolve args to module lists
|
||||
expanded = []
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
expanded.append(arg)
|
||||
else:
|
||||
expanded.extend(getFilesForName(arg))
|
||||
args = expanded
|
||||
|
||||
# slurp through all the files
|
||||
eater = TokenEater(options)
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
if options.verbose:
|
||||
print(_('Reading standard input'))
|
||||
fp = sys.stdin
|
||||
closep = 0
|
||||
else:
|
||||
if options.verbose:
|
||||
print(_('Working on %s') % filename)
|
||||
fp = open(filename, 'rb')
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
try:
|
||||
if PY2:
|
||||
for token_info in tokenize.generate_tokens(fp.readline):
|
||||
eater(*token_info)
|
||||
else:
|
||||
for token_info in tokenize.tokenize(fp.readline):
|
||||
eater(*token_info)
|
||||
except tokenize.TokenError as e:
|
||||
print_('%s: %s, line %d, column %d' % (
|
||||
e[0], filename, e[1][0], e[1][1]), file=sys.stderr)
|
||||
except tokenize.StopTokenizing:
|
||||
pass
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
# write the output
|
||||
if options.outfile == '-':
|
||||
fp = sys.stdout
|
||||
closep = 0
|
||||
else:
|
||||
if options.outpath:
|
||||
options.outfile = os.path.join(options.outpath, options.outfile)
|
||||
fp = open(options.outfile, 'w')
|
||||
closep = 1
|
||||
try:
|
||||
eater.write(fp)
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
# some more test strings
|
||||
_('a unicode string')
|
||||
# this one creates a warning
|
||||
_('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
|
||||
_('more' 'than' 'one' 'string')
|
Loading…
Add table
Reference in a new issue