Commit 11745516 authored by Andreas Jung's avatar Andreas Jung

docutils update

parent f17718be
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -23,12 +23,6 @@ Modules:
- nodes.py: Docutils document tree (doctree) node class library.
- optik.py: Option parsing and command-line help; from Greg Ward's
http://optik.sf.net/ project, included for convenience.
- roman.py: Conversion to and from Roman numerals. Courtesy of Mark
Pilgrim (http://diveintopython.org/).
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
......@@ -55,12 +49,12 @@ Subpackages:
__docformat__ = 'reStructuredText'
__version__ = '0.2.8'
__version__ = '0.3.0'
"""``major.minor.micro`` version number. The micro number is bumped any time
there's a change in the API incompatible with one of the front ends. The
minor number is bumped whenever there is a project release. The major number
will be bumped when the project is complete, and perhaps if there is a major
change in the design."""
will be bumped when the project is feature-complete, and perhaps if there is a
major change in the design."""
class ApplicationError(StandardError): pass
......@@ -85,7 +79,11 @@ class SettingsSpec:
and/or description may be `None`; no group title implies no group, just a
list of single options. Runtime settings names are derived implicitly
from long option names ("--a-setting" becomes ``settings.a_setting``) or
explicitly from the "destination" keyword argument."""
explicitly from the "dest" keyword argument."""
settings_defaults = None
"""A dictionary of defaults for internal or inaccessible (by command-line
or config file) settings. Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
......
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -15,9 +15,9 @@ custom component objects first, and pass *them* to
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
from docutils import frontend, io, readers, parsers, writers
from docutils.frontend import OptionParser, ConfigParser
from docutils import Component, __version__
from docutils import frontend, io, utils, readers, parsers, writers
from docutils.frontend import OptionParser
class Publisher:
......@@ -87,14 +87,8 @@ class Publisher:
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(settings_spec, self.parser, self.reader, self.writer),
defaults=defaults, read_config_files=1,
usage=usage, description=description)
config = ConfigParser()
config.read_standard_files()
config_settings = config.get_section('options')
frontend.make_paths_absolute(config_settings,
option_parser.relative_path_settings)
defaults.update(config_settings)
option_parser.set_defaults(**defaults)
return option_parser
def get_settings(self, usage=None, description=None,
......@@ -148,7 +142,8 @@ class Publisher:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding)
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self, document):
document.transformer.populate_from_components(
......@@ -157,7 +152,8 @@ class Publisher:
document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None):
settings_spec=None, settings_overrides=None,
enable_exit=None):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
......@@ -169,25 +165,52 @@ class Publisher:
elif settings_overrides:
self.settings._update(settings_overrides, 'loose')
self.set_io()
document = self.reader.read(self.source, self.parser, self.settings)
self.apply_transforms(document)
output = self.writer.write(document, self.destination)
exit = None
document = None
try:
document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms(document)
output = self.writer.write(document, self.destination)
except utils.SystemMessage, error:
if self.settings.traceback:
raise
print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
exit = 1
except Exception, error:
if self.settings.traceback:
raise
print >>sys.stderr, error
print >>sys.stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <docutils-users@lists.sf.net>.
Include "--traceback" output, Docutils version (%s),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, sys.version.split()[0]))
exit = 1
if self.settings.dump_settings:
from pprint import pformat
print >>sys.stderr, '\n::: Runtime settings:'
print >>sys.stderr, pformat(self.settings.__dict__)
if self.settings.dump_internals:
if self.settings.dump_internals and document:
from pprint import pformat
print >>sys.stderr, '\n::: Document internals:'
print >>sys.stderr, pformat(document.__dict__)
if self.settings.dump_transforms:
if self.settings.dump_transforms and document:
from pprint import pformat
print >>sys.stderr, '\n::: Transforms applied:'
print >>sys.stderr, pformat(document.transformer.applied)
if self.settings.dump_pseudo_xml:
if self.settings.dump_pseudo_xml and document:
print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, document.pformat().encode(
'raw_unicode_escape')
if enable_exit and document and (document.reporter.max_level
>= self.settings.exit_level):
sys.exit(document.reporter.max_level + 10)
elif exit:
sys.exit(1)
return output
......@@ -199,7 +222,7 @@ def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, argv=None,
settings_overrides=None, enable_exit=1, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher`. For command-line front ends.
......@@ -220,6 +243,7 @@ def publish_cmdline(reader=None, reader_name='standalone',
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `enable_exit`: Boolean; enable exit status at end of processing?
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
......@@ -228,14 +252,16 @@ def publish_cmdline(reader=None, reader_name='standalone',
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
pub.publish(argv, usage, description, settings_spec, settings_overrides)
pub.publish(argv, usage, description, settings_spec, settings_overrides,
enable_exit=enable_exit)
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None):
settings=None, settings_spec=None, settings_overrides=None,
enable_exit=None):
"""
Set up & run a `Publisher`. For programmatic use with file-like I/O.
......@@ -263,6 +289,7 @@ def publish_file(source=None, source_path=None,
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `enable_exit`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
......@@ -272,21 +299,27 @@ def publish_file(source=None, source_path=None,
settings._update(settings_overrides, 'loose')
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
pub.publish()
pub.publish(enable_exit=enable_exit)
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None):
settings_overrides=None, enable_exit=None):
"""
Set up & run a `Publisher`, and return the string output.
For programmatic use with string I/O.
For encoded string output, be sure to set the "output_encoding" setting to
the desired encoding. Set it to "unicode" for unencoded Unicode string
output.
output. Here's how::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters:
......@@ -312,6 +345,7 @@ def publish_string(source, source_path=None, destination_path=None,
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `enable_exit`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=io.StringInput,
......@@ -323,4 +357,4 @@ def publish_string(source, source_path=None, destination_path=None,
settings._update(settings_overrides, 'loose')
pub.set_source(source, source_path)
pub.set_destination(destination_path=destination_path)
return pub.publish()
return pub.publish(enable_exit=enable_exit)
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -19,10 +19,13 @@ __docformat__ = 'reStructuredText'
import os
import os.path
import sys
import types
import ConfigParser as CP
import codecs
import docutils
from docutils import optik
from docutils.optik import Values
import optparse
from optparse import Values, SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs):
......@@ -42,12 +45,85 @@ def read_config_file(option, opt, value, parser):
Read a configuration file during option processing. (Option callback.)
"""
config_parser = ConfigParser()
config_parser.read(value)
config_parser.read(value, parser)
settings = config_parser.get_section('options')
make_paths_absolute(settings, parser.relative_path_settings,
os.path.dirname(value))
parser.values.__dict__.update(settings)
def set_encoding(option, opt, value, parser):
"""
Validate & set the encoding specified. (Option callback.)
"""
try:
value = validate_encoding(option.dest, value)
except LookupError, error:
raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
None, sys.exc_info()[2])
setattr(parser.values, option.dest, value)
def validate_encoding(name, value):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('unknown encoding: "%s"' % value),
None, sys.exc_info()[2])
return value
def set_encoding_error_handler(option, opt, value, parser):
"""
Validate & set the encoding error handler specified. (Option callback.)
"""
try:
value = validate_encoding_error_handler(option.dest, value)
except LookupError, error:
raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
None, sys.exc_info()[2])
setattr(parser.values, option.dest, value)
def validate_encoding_error_handler(name, value):
try:
codecs.lookup_error(value)
except AttributeError: # prior to Python 2.3
if value not in ('strict', 'ignore', 'replace'):
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", or "replace")' % value),
None, sys.exc_info()[2])
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def set_encoding_and_error_handler(option, opt, value, parser):
"""
Validate & set the encoding and error handler specified. (Option callback.)
"""
try:
value = validate_encoding_and_error_handler(option.dest, value)
except LookupError, error:
raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
None, sys.exc_info()[2])
if ':' in value:
encoding, handler = value.split(':')
setattr(parser.values, option.dest + '_error_handler', handler)
else:
encoding = value
setattr(parser.values, option.dest, encoding)
def validate_encoding_and_error_handler(name, value):
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(name + '_error_handler', handler)
else:
encoding = value
validate_encoding(name, encoding)
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
......@@ -63,7 +139,7 @@ def make_paths_absolute(pathdict, keys, base_path=None):
os.path.abspath(os.path.join(base_path, pathdict[key])))
class OptionParser(optik.OptionParser, docutils.SettingsSpec):
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
......@@ -81,6 +157,11 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
if hasattr(codecs, 'backslashreplace_errors'):
default_error_encoding_error_handler = 'backslashreplace'
else:
default_error_encoding_error_handler = 'replace'
settings_spec = (
'General Docutils Options',
None,
......@@ -147,17 +228,54 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
('Same as "--halt=info": halt processing at the slightest problem.',
['--strict'], {'action': 'store_const', 'const': 'info',
'dest': 'halt_level'}),
('Report debug-level system messages.',
('Enable a non-zero exit status for normal exit if non-halting '
'system messages (at or above <level>) were generated. Levels as '
'in --report. Default is 5 (disabled). Exit status is the maximum '
'system message level plus 10 (11 for INFO, etc.).',
['--exit'], {'choices': threshold_choices, 'dest': 'exit_level',
'default': 5, 'metavar': '<level>'}),
('Report debug-level system messages and generate diagnostic output.',
['--debug'], {'action': 'store_true'}),
('Do not report debug-level system messages.',
('Do not report debug-level system messages or generate diagnostic '
'output.',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages (warnings) to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when an error occurs.',
['--traceback'], {'action': 'store_true', 'default': None}),
('Disable Python tracebacks when errors occur; report just the error '
'instead. This is the default.',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding of input text. Default is locale-dependent.',
['--input-encoding', '-i'], {'metavar': '<name>'}),
('Specify the encoding for output. Default is UTF-8.',
['--input-encoding', '-i'],
{'action': 'callback', 'callback': set_encoding,
'metavar': '<name>', 'type': 'string', 'dest': 'input_encoding'}),
('Specify the text encoding for output. Default is UTF-8. '
'Optionally also specify the encoding error handler for unencodable '
'characters (see "--error-encoding"); default is "strict".',
['--output-encoding', '-o'],
{'metavar': '<name>', 'default': 'utf-8'}),
{'action': 'callback', 'callback': set_encoding_and_error_handler,
'metavar': '<name[:handler]>', 'type': 'string',
'dest': 'output_encoding', 'default': 'utf-8'}),
(SUPPRESS_HELP, # usually handled by --output-encoding
['--output_encoding_error_handler'],
{'action': 'callback', 'callback': set_encoding_error_handler,
'type': 'string', 'dest': 'output_encoding_error_handler',
'default': 'strict'}),
('Specify the text encoding for error output. Default is ASCII. '
'Optionally also specify the encoding error handler for unencodable '
'characters, after a colon (":"). Acceptable values are the same '
'as for the "error" parameter of Python\'s ``encode`` string '
'method. Default is "%s".' % default_error_encoding_error_handler,
['--error-encoding', '-e'],
{'action': 'callback', 'callback': set_encoding_and_error_handler,
'metavar': '<name[:handler]>', 'type': 'string',
'dest': 'error_encoding', 'default': 'ascii'}),
(SUPPRESS_HELP, # usually handled by --error-encoding
['--error_encoding_error_handler'],
{'action': 'callback', 'callback': set_encoding_error_handler,
'type': 'string', 'dest': 'error_encoding_error_handler',
'default': default_error_encoding_error_handler}),
('Specify the language of input text (ISO 639 2-letter identifier).'
' Default is "en" (English).',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
......@@ -170,53 +288,60 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Hidden options, for development use only:
(optik.SUPPRESS_HELP,
['--dump-settings'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-internals'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-transforms'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-pseudo-xml'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--expose-internal-attribute'],
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals'}),))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
version_template = '%%prog (Docutils %s)' % docutils.__version__
"""Default version message."""
def __init__(self, components=(), *args, **kwargs):
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
optik.OptionParser.__init__(
self, help=None,
format=optik.Titled(),
# Needed when Optik is updated (replaces above 2 lines):
#self, add_help=None,
#formatter=optik.TitledHelpFormatter(width=78),
optparse.OptionParser.__init__(
self, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Internal settings with no defaults from settings specifications;
# initialize manually:
self.set_defaults(_source=None, _destination=None)
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.populate_from_components(tuple(components) + (self,))
self.populate_from_components((self,) + tuple(components))
defaults = defaults or {}
if read_config_files and not self.defaults['_disable_config']:
# @@@ Extract this code into a method, which can be called from
# the read_config_file callback also.
config = ConfigParser()
config.read_standard_files(self)
config_settings = config.get_section('options')
make_paths_absolute(config_settings, self.relative_path_settings)
defaults.update(config_settings)
# Internal settings with no defaults from settings specifications;
# initialize manually:
self.set_defaults(_source=None, _destination=None, **defaults)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
......@@ -227,13 +352,15 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
while i < len(settings_spec):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optik.OptionGroup(self, title, description)
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
group.add_option(help=help_text, *option_strings,
**kwargs)
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
i += 3
for component in components:
if component and component.settings_default_overrides:
......@@ -244,6 +371,8 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
values.report_level = self.check_threshold(values.report_level)
if hasattr(values, 'halt_level'):
values.halt_level = self.check_threshold(values.halt_level)
if hasattr(values, 'exit_level'):
values.exit_level = self.check_threshold(values.exit_level)
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings,
os.getcwd())
......@@ -262,8 +391,12 @@ class OptionParser(optik.OptionParser, docutils.SettingsSpec):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
......@@ -281,8 +414,44 @@ class ConfigParser(CP.ConfigParser):
"""Docutils configuration files, using ConfigParser syntax (section
'options'). Later files override earlier ones."""
def read_standard_files(self):
self.read(self.standard_config_files)
validation = {
'options':
{'input_encoding': validate_encoding,
'output_encoding': validate_encoding,
'output_encoding_error_handler': validate_encoding_error_handler,
'error_encoding': validate_encoding,
'error_encoding_error_handler': validate_encoding_error_handler}}
"""{section: {option: validation function}} mapping, used by
`validate_options`. Validation functions take two parameters: name and
value. They return a (possibly modified) value, or raise an exception."""
def read_standard_files(self, option_parser):
self.read(self.standard_config_files, option_parser)
def read(self, filenames, option_parser):
if type(filenames) in types.StringTypes:
filenames = [filenames]
for filename in filenames:
CP.ConfigParser.read(self, filename)
self.validate_options(filename, option_parser)
def validate_options(self, filename, option_parser):
for section in self.validation.keys():
if not self.has_section(section):
continue
for option in self.validation[section].keys():
if self.has_option(section, option):
value = self.get(section, option)
validator = self.validation[section][option]
try:
new_value = validator(option, value)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s: %s\n %s = %s'
% (filename, section, error.__class__.__name__,
error, option, value)), None, sys.exc_info()[2])
self.set(section, option, new_value)
def optionxform(self, optionstr):
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -13,6 +13,7 @@ __docformat__ = 'reStructuredText'
import sys
import locale
from types import UnicodeType
from docutils import TransformSpec
......@@ -26,20 +27,9 @@ class Input(TransformSpec):
default_source_path = None
def __init__(self, settings=None, source=None, source_path=None,
encoding=None):
def __init__(self, source=None, source_path=None, encoding=None):
self.encoding = encoding
"""The character encoding for the input source."""
if settings:
if not encoding:
self.encoding = settings.input_encoding
import warnings, traceback
warnings.warn(
'Setting input encoding via a "settings" struct is '
'deprecated; send encoding directly instead.\n%s'
% ''.join(traceback.format_list(traceback.extract_stack()
[-3:-1])))
"""Text encoding for the input source."""
self.source = source
"""The source of input data."""
......@@ -67,7 +57,8 @@ class Input(TransformSpec):
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
if (self.encoding and self.encoding.lower() == 'unicode'
or isinstance(data, UnicodeType)):
return unicode(data)
encodings = [self.encoding, 'utf-8']
try:
......@@ -87,8 +78,7 @@ class Input(TransformSpec):
if not enc:
continue
try:
decoded = unicode(data, enc)
return decoded
return unicode(data, enc)
except (UnicodeError, LookupError):
pass
raise UnicodeError(
......@@ -106,20 +96,13 @@ class Output(TransformSpec):
default_destination_path = None
def __init__(self, settings=None, destination=None, destination_path=None,
encoding=None):
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""The character encoding for the output destination."""
if settings:
if not encoding:
self.encoding = settings.output_encoding
import warnings, traceback
warnings.warn(
'Setting output encoding via a "settings" struct is '
'deprecated; send encoding directly instead.\n%s'
% ''.join(traceback.format_list(traceback.extract_stack()
[-3:-1])))
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
......@@ -141,7 +124,7 @@ class Output(TransformSpec):
if self.encoding and self.encoding.lower() == 'unicode':
return data
else:
return data.encode(self.encoding or '')
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
......@@ -150,8 +133,8 @@ class FileInput(Input):
Input for single, simple file-like objects.
"""
def __init__(self, settings=None, source=None, source_path=None,
encoding=None, autoclose=1):
def __init__(self, source=None, source_path=None,
encoding=None, autoclose=1, handle_io_errors=1):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
......@@ -160,11 +143,22 @@ class FileInput(Input):
- `autoclose`: close automatically after read (boolean); always
false if `sys.stdin` is the source.
"""
Input.__init__(self, settings, source, source_path, encoding)
Input.__init__(self, source, source_path, encoding)
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
if source is None:
if source_path:
self.source = open(source_path)
try:
self.source = open(source_path)
except IOError, error:
if not handle_io_errors:
raise
print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
error)
print >>sys.stderr, (
'Unable to open source file for reading (%s). Exiting.'
% source_path)
sys.exit(1)
else:
self.source = sys.stdin
self.autoclose = None
......@@ -191,8 +185,9 @@ class FileOutput(Output):
Output for single, simple file-like objects.
"""
def __init__(self, settings=None, destination=None, destination_path=None,
encoding=None, autoclose=1):
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=1,
handle_io_errors=1):
"""
:Parameters:
- `destination`: either a file-like object (which is written
......@@ -203,10 +198,11 @@ class FileOutput(Output):
- `autoclose`: close automatically after write (boolean); always
false if `sys.stdout` is the destination.
"""
Output.__init__(self, settings, destination, destination_path,
encoding)
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = 1
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
if destination is None:
if destination_path:
self.opened = None
......@@ -220,7 +216,16 @@ class FileOutput(Output):
pass
def open(self):
self.destination = open(self.destination_path, 'w')
try:
self.destination = open(self.destination_path, 'w')
except IOError, error:
if not self.handle_io_errors:
raise
print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
error)
print >>sys.stderr, ('Unable to open destination file for writing '
'(%s). Exiting.' % source_path)
sys.exit(1)
self.opened = 1
def write(self, data):
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:02 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/spec/howto/i18n.html>.
"""
This package contains modules for language-dependent features of Docutils.
"""
......
# Author: Jannie Hofmeyr
# Contact: jhsh@sun.ac.za
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisasie',
'address': 'Adres',
'contact': 'Kontak',
'version': 'Weergawe',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Kopiereg',
'dedication': 'Opdrag',
'abstract': 'Opsomming',
'attention': 'Aandag!',
'caution': 'Wees versigtig!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Wenk',
'important': 'Belangrik',
'note': 'Nota',
'tip': 'Tip', # hint and tip both have the same translation: wenk
'warning': 'Waarskuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'auteur': 'author',
'auteurs': 'authors',
'organisasie': 'organization',
'adres': 'address',
'kontak': 'contact',
'weergawe': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'kopiereg': 'copyright',
'opdrag': 'dedication',
'opsomming': 'abstract'}
"""Afrikaans (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:03 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Autor',
'authors': 'Autoren',
......@@ -39,20 +41,19 @@ labels = {
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autor': nodes.author,
'autoren': nodes.authors,
'organisation': nodes.organization,
'adresse': nodes.address,
'kontakt': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'datum': nodes.date,
'copyright': nodes.copyright,
'widmung': nodes.topic,
'zusammenfassung': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
'autor': 'author',
'autoren': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'widmung': 'dedication',
'zusammenfassung': 'abstract'}
"""German (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:03 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
# fixed: language-dependent
'author': 'Author',
'authors': 'Authors',
'organization': 'Organization',
......@@ -40,20 +42,20 @@ labels = {
"""Mapping of node class name to label text."""
bibliographic_fields = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
# language-dependent: fixed
'author': 'author',
'authors': 'authors',
'organization': 'organization',
'address': 'address',
'contact': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'date': 'date',
'copyright': 'copyright',
'dedication': 'dedication',
'abstract': 'abstract'}
"""English (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: Marcelo Huerta San Martn
# Contact: mghsm@uol.com.ar
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': u'Autor',
'authors': u'Autores',
'organization': u'Organizaci\u00f3n',
'address': u'Direcci\u00f3n',
'contact': u'Contacto',
'version': u'Versi\u00f3n',
'revision': u'Revisi\u00f3n',
'status': u'Estado',
'date': u'Fecha',
'copyright': u'Copyright',
'dedication': u'Dedicatoria',
'abstract': u'Resumen',
'attention': u'\u00a1Atenci\u00f3n!',
'caution': u'\u00a1Precauci\u00f3n!',
'danger': u'\u00a1PELIGRO!',
'error': u'Error',
'hint': u'Sugerencia',
'important': u'Importante',
'note': u'Nota',
'tip': u'Consejo',
'warning': u'Advertencia',
'contents': u'Contenido'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'autor': 'author',
u'autores': 'authors',
u'organizaci\u00f3n': 'organization',
u'direcci\u00f3n': 'address',
u'contacto': 'contact',
u'versi\u00f3n': 'version',
u'revisi\u00f3n': 'revision',
u'estado': 'status',
u'fecha': 'date',
u'copyright': 'copyright',
u'dedicatoria': 'dedication',
u'resumen': 'abstract'}
"""Spanish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
# Author: Stefane Fermigier
# Contact: sf@fermigier.com
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:03 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Contact',
'version': 'Version',
'revision': 'R\u00e9vision',
'status': 'Statut',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'D\u00e9dicace',
'abstract': 'R\u00e9sum\u00e9',
'attention': 'Attention!',
'caution': 'Avertissement!',
'danger': '!DANGER!',
'error': 'Erreur',
'hint': 'Indication',
'important': 'Important',
'note': 'Note',
'tip': 'Astuce',
'warning': 'Avertissement',
'contents': 'Contenu'}
u'author': u'Auteur',
u'authors': u'Auteurs',
u'organization': u'Organisation',
u'address': u'Adresse',
u'contact': u'Contact',
u'version': u'Version',
u'revision': u'R\u00e9vision',
u'status': u'Statut',
u'date': u'Date',
u'copyright': u'Copyright',
u'dedication': u'D\u00e9dicace',
u'abstract': u'R\u00e9sum\u00e9',
u'attention': u'Attention!',
u'caution': u'Avertissement!',
u'danger': u'!DANGER!',
u'error': u'Erreur',
u'hint': u'Indication',
u'important': u'Important',
u'note': u'Note',
u'tip': u'Astuce',
u'warning': u'Avis',
u'contents': u'Sommaire'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'auteur': nodes.author,
'auteurs': nodes.authors,
'organisation': nodes.organization,
'adresse': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'r\u00e9vision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'd\u00e9dicace': nodes.topic,
'r\u00e9sum\u00e9': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
u'auteur': u'author',
u'auteurs': u'authors',
u'organisation': u'organization',
u'adresse': u'address',
u'contact': u'contact',
u'version': u'version',
u'r\u00e9vision': u'revision',
u'statut': u'status',
u'date': u'date',
u'copyright': u'copyright',
u'd\u00e9dicace': u'dedication',
u'r\u00e9sum\u00e9': u'abstract'}
"""French (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: Nicola Larosa
# Contact: docutils@tekNico.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:03 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Italian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Autore',
'authors': 'Autori',
......@@ -40,20 +41,19 @@ labels = {
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autore': nodes.author,
'autori': nodes.authors,
'organizzazione': nodes.organization,
'indirizzo': nodes.address,
'contatti': nodes.contact,
'versione': nodes.version,
'revisione': nodes.revision,
'status': nodes.status,
'data': nodes.date,
'copyright': nodes.copyright,
'dedica': nodes.topic,
'riassunto': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
'autore': 'author',
'autori': 'authors',
'organizzazione': 'organization',
'indirizzo': 'address',
'contatti': 'contact',
'versione': 'version',
'revisione': 'revision',
'status': 'status',
'data': 'date',
'copyright': 'copyright',
'dedica': 'dedication',
'riassunto': 'abstract'}
"""Italian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: Roman Suzi
# Contact: rnd@onego.ru
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
u'abstract': u'\u0410\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f',
u'address': u'\u0410\u0434\u0440\u0435\u0441',
u'attention': u'\u0412\u043d\u0438\u043c\u0430\u043d\u0438\u0435!',
u'author': u'\u0410\u0432\u0442\u043e\u0440',
u'authors': u'\u0410\u0432\u0442\u043e\u0440\u044b',
u'caution': u'\u041e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e!',
u'contact': u'\u041a\u043e\u043d\u0442\u0430\u043a\u0442',
u'contents':
u'\u0421\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435',
u'copyright': u'\u041f\u0440\u0430\u0432\u0430 '
u'\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f',
u'danger': u'\u041e\u041f\u0410\u0421\u041d\u041e!',
u'date': u'\u0414\u0430\u0442\u0430',
u'dedication':
u'\u041f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435',
u'error': u'\u041e\u0448\u0438\u0431\u043a\u0430',
u'hint': u'\u0421\u043e\u0432\u0435\u0442',
u'important': u'\u0412\u0430\u0436\u043d\u043e',
u'note': u'\u041f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435',
u'organization':
u'\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f',
u'revision': u'\u0420\u0435\u0434\u0430\u043a\u0446\u0438\u044f',
u'status': u'\u0421\u0442\u0430\u0442\u0443\u0441',
u'tip': u'\u041f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430',
u'version': u'\u0412\u0435\u0440\u0441\u0438\u044f',
u'warning': u'\u041f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436'
u'\u0434\u0435\u043d\u0438\u0435'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'\u0410\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f': u'abstract',
u'\u0410\u0434\u0440\u0435\u0441': u'address',
u'\u0410\u0432\u0442\u043e\u0440': u'author',
u'\u0410\u0432\u0442\u043e\u0440\u044b': u'authors',
u'\u041a\u043e\u043d\u0442\u0430\u043a\u0442': u'contact',
u'\u041f\u0440\u0430\u0432\u0430 \u043a\u043e\u043f\u0438\u0440\u043e'
u'\u0432\u0430\u043d\u0438\u044f': u'copyright',
u'\u0414\u0430\u0442\u0430': u'date',
u'\u041f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435':
u'dedication',
u'\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f':
u'organization',
u'\u0420\u0435\u0434\u0430\u043a\u0446\u0438\u044f': u'revision',
u'\u0421\u0442\u0430\u0442\u0443\u0441': u'status',
u'\u0412\u0435\u0440\u0441\u0438\u044f': u'version'}
"""Russian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
# :Author: Miroslav Vasko
# :Contact: zemiak@zoznam.sk
# :Revision: $Revision: 1.2 $
# :Date: $Date: 2003/02/01 09:26:03 $
# :Revision: $Revision: 1.3 $
# :Date: $Date: 2003/07/10 15:49:34 $
# :Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Slovak-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': u'Autor',
'authors': u'Autori',
......@@ -40,20 +41,19 @@ labels = {
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'autor': nodes.author,
u'autori': nodes.authors,
u'organiz\u00E1cia': nodes.organization,
u'adresa': nodes.address,
u'kontakt': nodes.contact,
u'verzia': nodes.version,
u'rev\u00EDzia': nodes.revision,
u'stav': nodes.status,
u'D\u00E1tum': nodes.date,
u'copyright': nodes.copyright,
u'venovanie': nodes.topic,
u'abstraktne': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
u'autor': 'author',
u'autori': 'authors',
u'organiz\u00E1cia': 'organization',
u'adresa': 'address',
u'kontakt': 'contact',
u'verzia': 'version',
u'rev\u00EDzia': 'revision',
u'stav': 'status',
u'd\u00E1tum': 'date',
u'copyright': 'copyright',
u'venovanie': 'dedication',
u'abstraktne': 'abstract'}
"""Slovak (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:03 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': u'F\u00f6rfattare',
'authors': u'F\u00f6rfattare',
......@@ -41,19 +42,19 @@ labels = {
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': nodes.authors,
u'organisation': nodes.organization,
u'adress': nodes.address,
u'kontakt': nodes.contact,
u'version': nodes.version,
u'revision': nodes.revision,
u'status': nodes.status,
u'datum': nodes.date,
u'copyright': nodes.copyright,
u'dedikation': nodes.topic,
u'sammanfattning': nodes.topic }
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
u'f\u00f6rfattare': 'authors',
u' n/a': 'author',
u'organisation': 'organization',
u'adress': 'address',
u'kontakt': 'contact',
u'version': 'version',
u'revision': 'revision',
u'status': 'status',
u'datum': 'date',
u'copyright': 'copyright',
u'dedikation': 'dedication',
u'sammanfattning': 'abstract' }
"""Swedish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -53,7 +53,14 @@ class Node:
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""Node instances are always true."""
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return 1
def asdom(self, dom=xml.dom.minidom):
......@@ -175,6 +182,9 @@ class Text(Node, UserString):
data = repr(self.data[:64] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def __len__(self):
return len(self.data)
def shortrepr(self):
data = repr(self.data)
if len(data) > 20:
......@@ -261,9 +271,9 @@ class Element(Node):
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attributes.items():
if type(value) is ListType:
value = ' '.join(value)
element.setAttribute(attribute, str(value))
if isinstance(value, ListType):
value = ' '.join(['%s' % v for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
......@@ -289,10 +299,13 @@ class Element(Node):
return '<%s...>' % self.tagname
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def __unicode__(self):
if self.children:
return '%s%s%s' % (self.starttag(),
''.join([str(c) for c in self.children]),
self.endtag())
return u'%s%s%s' % (self.starttag(),
''.join([str(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
......@@ -302,19 +315,19 @@ class Element(Node):
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, ListType):
values = [str(v) for v in value]
values = ['%s' % v for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, str(value)))
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return '<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
......@@ -619,6 +632,10 @@ class document(Root, Structural, Element):
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
......@@ -864,8 +881,8 @@ class document(Root, Structural, Element):
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, msgnode=None):
name = subdef['name']
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = subdef['name'] = whitespace_normalize_name(def_name)
if self.substitution_defs.has_key(name):
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
......@@ -874,12 +891,14 @@ class document(Root, Structural, Element):
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode)
# keep only the last definition
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref):
self.substitution_refs.setdefault(
subref['refname'], []).append(subref)
def note_substitution_ref(self, subref, refname):
name = subref['refname'] = whitespace_normalize_name(refname)
self.substitution_refs.setdefault(name, []).append(subref)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
......@@ -908,6 +927,7 @@ class document(Root, Structural, Element):
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
......@@ -947,13 +967,30 @@ class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics or body elements; you can't have a topic inside a table,
list, block quote, etc.
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
......@@ -1009,6 +1046,7 @@ class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, FixedTextElement): pass
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
......@@ -1018,6 +1056,7 @@ class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, PreBibliographic, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
......@@ -1050,8 +1089,8 @@ class system_message(Special, PreBibliographic, Element, BackLinkable):
def astext(self):
line = self.get('line', '')
return '%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, PreBibliographic, Element):
......@@ -1106,7 +1145,7 @@ class pending(Special, Invisible, PreBibliographic, Element):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and type(value) == ListType \
elif value and isinstance(value, ListType) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
......@@ -1146,6 +1185,8 @@ class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, TextElement):
......@@ -1154,6 +1195,7 @@ class image(General, Inline, TextElement):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
......@@ -1164,7 +1206,8 @@ class generated(Inline, TextElement): pass
node_class_names = """
Text
abbreviation acronym address attention author authors
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
contact copyright
......@@ -1175,15 +1218,15 @@ node_class_names = """
footnote footnote_reference
generated
header hint
image important
image important inline
label legend line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row
section status strong substitution_definition substitution_reference
subtitle system_message
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
......@@ -1248,11 +1291,14 @@ class SparseNodeVisitor(NodeVisitor):
subclasses), subclass `NodeVisitor` instead.
"""
# Save typing with dynamic definitions.
for name in node_class_names:
exec """def visit_%s(self, node): pass\n""" % name
exec """def depart_%s(self, node): pass\n""" % name
del name
def _nop(self, node):
pass
# Save typing with dynamic assignments:
for _name in node_class_names:
setattr(SparseNodeVisitor, "visit_" + _name, _nop)
setattr(SparseNodeVisitor, "depart_" + _name, _nop)
del _name, _nop
class GenericNodeVisitor(NodeVisitor):
......@@ -1281,13 +1327,17 @@ class GenericNodeVisitor(NodeVisitor):
"""Override for generic, uniform traversals."""
raise NotImplementedError
# Save typing with dynamic definitions.
for name in node_class_names:
exec """def visit_%s(self, node):
self.default_visit(node)\n""" % name
exec """def depart_%s(self, node):
self.default_departure(node)\n""" % name
del name
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
# Save typing with dynamic assignments:
for _name in node_class_names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
del _name, _call_default_visit, _call_default_departure
class TreeCopyVisitor(GenericNodeVisitor):
......@@ -1385,9 +1435,9 @@ def make_id(string):
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z][-a-z0-9]*``. For CSS compatibility, identifiers (the "class" and
"id" attributes) should have no underscores, colons, or periods. Hyphens
may be used.
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
......@@ -1410,7 +1460,7 @@ def make_id(string):
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z][-a-z0-9]*`` pattern.
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
......@@ -1425,3 +1475,11 @@ _non_id_at_ends = re.compile('^[-0-9]+|-+$')
def dupname(node):
node['dupname'] = node['name']
del node['name']
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:05 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:39 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:07 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:40 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -95,7 +95,10 @@ class Parser(docutils.parsers.Parser):
{'action': 'store_true'}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8}),))
{'metavar': '<width>', 'type': 'int', 'default': 8}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true'}),))
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -60,7 +60,8 @@ directive function):
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse.
options to parse. Several directive option conversion functions are defined
in this module.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
......@@ -92,9 +93,15 @@ _directive_registry = {
'tip': ('admonitions', 'tip'),
'hint': ('admonitions', 'hint'),
'warning': ('admonitions', 'warning'),
'admonition': ('admonitions', 'admonition'),
'sidebar': ('body', 'sidebar'),
'topic': ('body', 'topic'),
'line-block': ('body', 'line_block'),
'parsed-literal': ('body', 'parsed_literal'),
'rubric': ('body', 'rubric'),
'epigraph': ('body', 'epigraph'),
'highlights': ('body', 'highlights'),
'pull-quote': ('body', 'pull_quote'),
#'questions': ('body', 'question_list'),
'image': ('images', 'image'),
'figure': ('images', 'figure'),
......@@ -108,6 +115,8 @@ _directive_registry = {
'raw': ('misc', 'raw'),
'include': ('misc', 'include'),
'replace': ('misc', 'replace'),
'unicode': ('misc', 'unicode_directive'),
'class': ('misc', 'class_directive'),
'restructuredtext-test-directive': ('misc', 'directive_test_function'),}
"""Mapping of directive name to (module name, function name). The directive
name is canonical & must be lowercase. Language-dependent names are defined
......@@ -171,9 +180,14 @@ def directive(directive_name, language_module, document):
return None, messages
return function, messages
def register_directive(name, directive):
"""Register a nonstandard application-defined directive function."""
_directives[name] = directive
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
......@@ -182,9 +196,10 @@ def flag(argument):
else:
return None
def unchanged(argument):
def unchanged_required(argument):
"""
Return the argument, unchanged.
Return the argument text, unchanged.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
......@@ -193,9 +208,22 @@ def unchanged(argument):
else:
return argument # unchanged!
def unchanged(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
No argument implies empty string ("").
"""
if argument is None:
return u''
else:
return argument # unchanged!
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found or if the path contains
internal whitespace.
......@@ -212,17 +240,44 @@ def path(argument):
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def class_option(argument):
"""
Convert the argument into an ID-compatible string and return it.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
return nodes.make_id(argument)
def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1])
def choice(argument, values):
"""
Directive option utility function, supplied to enable options whose
argument must be a member of a finite set of possible values (must be
lower case). A custom conversion function must be written to use it. For
example::
from docutils.parsers.rst import directives
def yesno(argument):
return directives.choice(argument, ('yes', 'no'))
Raise ``ValueError`` if no argument is found or if the argument's value is
not valid (not an entry in the supplied list).
"""
try:
value = argument.lower().strip()
except AttributeError:
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -11,64 +11,80 @@ Admonition directives.
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import states
from docutils.parsers.rst import states, directives
from docutils import nodes
def admonition(node_class, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
text = '\n'.join(content)
admonition_node = node_class(text)
if text:
state.nested_parse(content, content_offset, admonition_node)
return [admonition_node]
else:
def make_admonition(node_class, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not content:
error = state_machine.reporter.error(
'The "%s" admonition is empty; content required.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
text = '\n'.join(content)
admonition_node = node_class(text)
if arguments:
title_text = arguments[0]
textnodes, messages = state.inline_text(title_text, lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if options.has_key('class'):
class_value = options['class']
else:
class_value = 'admonition-' + nodes.make_id(title_text)
admonition_node.set_class(class_value)
state.nested_parse(content, content_offset, admonition_node)
return [admonition_node]
def admonition(*args):
return make_admonition(nodes.admonition, *args)
admonition.arguments = (1, 0, 1)
admonition.options = {'class': directives.class_option}
admonition.content = 1
def attention(*args):
return admonition(nodes.attention, *args)
return make_admonition(nodes.attention, *args)
attention.content = 1
def caution(*args):
return admonition(nodes.caution, *args)
return make_admonition(nodes.caution, *args)
caution.content = 1
def danger(*args):
return admonition(nodes.danger, *args)
return make_admonition(nodes.danger, *args)
danger.content = 1
def error(*args):
return admonition(nodes.error, *args)
return make_admonition(nodes.error, *args)
error.content = 1
def hint(*args):
return make_admonition(nodes.hint, *args)
hint.content = 1
def important(*args):
return admonition(nodes.important, *args)
return make_admonition(nodes.important, *args)
important.content = 1
def note(*args):
return admonition(nodes.note, *args)
return make_admonition(nodes.note, *args)
note.content = 1
def tip(*args):
return admonition(nodes.tip, *args)
return make_admonition(nodes.tip, *args)
tip.content = 1
def hint(*args):
return admonition(nodes.hint, *args)
hint.content = 1
def warning(*args):
return admonition(nodes.warning, *args)
return make_admonition(nodes.warning, *args)
warning.content = 1
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -13,13 +13,16 @@ __docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import directives
def topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
content_offset, block_text, state, state_machine,
node_class=nodes.topic):
if not state_machine.match_titles:
error = state_machine.reporter.error(
'Topics may not be nested within topics or body elements.',
'The "%s" directive may not be used within topics, sidebars, '
'or body elements.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
if not content:
......@@ -30,16 +33,35 @@ def topic(name, arguments, options, content, lineno,
return [warning]
title_text = arguments[0]
textnodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *textnodes)
titles = [nodes.title(title_text, '', *textnodes)]
if options.has_key('subtitle'):
textnodes, more_messages = state.inline_text(options['subtitle'],
lineno)
titles.append(nodes.subtitle(options['subtitle'], '', *textnodes))
messages.extend(more_messages)
text = '\n'.join(content)
topic_node = nodes.topic(text, title, *messages)
node = node_class(text, *(titles + messages))
if options.has_key('class'):
node.set_class(options['class'])
if text:
state.nested_parse(content, content_offset, topic_node)
return [topic_node]
state.nested_parse(content, content_offset, node)
return [node]
topic.arguments = (1, 0, 1)
topic.options = {'class': directives.class_option}
topic.content = 1
def sidebar(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.sidebar)
sidebar.arguments = (1, 0, 1)
sidebar.options = {'subtitle': directives.unchanged_required,
'class': directives.class_option}
sidebar.content = 1
def line_block(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.line_block):
......@@ -50,9 +72,10 @@ def line_block(name, arguments, options, content, lineno,
return [warning]
text = '\n'.join(content)
text_nodes, messages = state.inline_text(text, lineno)
node = node_class(text, '', *text_nodes)
node = node_class(text, '', *text_nodes, **options)
return [node] + messages
line_block.options = {'class': directives.class_option}
line_block.content = 1
def parsed_literal(name, arguments, options, content, lineno,
......@@ -61,4 +84,39 @@ def parsed_literal(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.literal_block)
parsed_literal.options = {'class': directives.class_option}
parsed_literal.content = 1
def rubric(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
rubric_text = arguments[0]
textnodes, messages = state.inline_text(rubric_text, lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **options)
return [rubric] + messages
rubric.arguments = (1, 0, 1)
rubric.options = {'class': directives.class_option}
def epigraph(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
block_quote.set_class('epigraph')
return [block_quote] + messages
epigraph.content = 1
def highlights(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
block_quote.set_class('highlights')
return [block_quote] + messages
highlights.content = 1
def pull_quote(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
block_quote.set_class('pull-quote')
return [block_quote] + messages
pull_quote.content = 1
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -15,6 +15,10 @@ import sys
from docutils import nodes, utils
from docutils.parsers.rst import directives
try:
import Image # PIL
except ImportError:
Image = None
align_values = ('top', 'middle', 'bottom', 'left', 'center', 'right')
......@@ -38,15 +42,33 @@ image.options = {'alt': directives.unchanged,
'height': directives.nonnegative_int,
'width': directives.nonnegative_int,
'scale': directives.nonnegative_int,
'align': align}
'align': align,
'class': directives.class_option}
def figure(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
figwidth = options.setdefault('figwidth')
figclass = options.setdefault('figclass')
del options['figwidth']
del options['figclass']
(image_node,) = image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if Image:
# PIL doesn't like Unicode paths:
try:
i = Image.open(str(image_node['uri']))
except (IOError, UnicodeError):
pass
else:
figure_node['width'] = i.size[0]
elif figwidth is not None:
figure_node['width'] = figwidth
if figclass:
figure_node.set_class(figclass)
if content:
node = nodes.Element() # anonymous container for parsing
state.nested_parse(content, content_offset, node)
......@@ -65,6 +87,14 @@ def figure(name, arguments, options, content, lineno,
figure_node += nodes.legend('', *node[1:])
return [figure_node]
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.nonnegative_int(argument)
figure.arguments = (1, 0, 1)
figure.options = image.options
figure.options = {'figwidth': figwidth_value,
'figclass': directives.class_option}
figure.options.update(image.options)
figure.content = 1
# Authors: David Goodger, Dethe Elza
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
......@@ -10,9 +10,11 @@ __docformat__ = 'reStructuredText'
import sys
import os.path
import re
from urllib2 import urlopen, URLError
from docutils import io, nodes, statemachine, utils
from docutils.parsers.rst import directives, states
from docutils.transforms import misc
def include(name, arguments, options, content, lineno,
......@@ -31,10 +33,12 @@ def include(name, arguments, options, content, lineno,
path = utils.relative_path(None, path)
try:
include_file = io.FileInput(
source_path=path, encoding=state.document.settings.input_encoding)
source_path=path, encoding=state.document.settings.input_encoding,
handle_io_errors=None)
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
'Problems with "%s" directive path:\n%s: %s.'
% (name, error.__class__.__name__, error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
include_text = include_file.read()
......@@ -151,6 +155,65 @@ def replace(name, arguments, options, content, lineno,
replace.content = 1
def unicode_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character
entities (e.g. ``&#x262E;``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used within a '
'substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
codes = arguments[0].split('.. ')[0].split()
element = nodes.Element()
for code in codes:
try:
if code.isdigit():
element += nodes.Text(unichr(int(code)))
else:
match = unicode_pattern.match(code)
if match:
value = match.group(1) or match.group(2)
element += nodes.Text(unichr(int(value, 16)))
else:
element += nodes.Text(code)
except ValueError, err:
error = state_machine.reporter.error(
'Invalid character code: %s\n%s' % (code, err),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
return element.children
unicode_directive.arguments = (1, 0, 1)
unicode_pattern = re.compile(
r'(?:0x|x|\x00x|U\+?|\x00u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
def class_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
""""""
class_value = nodes.make_id(arguments[0])
if class_value:
pending = nodes.pending(misc.ClassAttribute,
{'class': class_value, 'directive': name},
block_text)
state_machine.document.note_pending(pending)
return [pending]
else:
error = state_machine.reporter.error(
'Invalid class attribute value for "%s" directive: %s'
% (name, arguments[0]),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
class_directive.arguments = (1, 0, 0)
class_directive.content = 1
def directive_test_function(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if content:
......@@ -166,5 +229,5 @@ def directive_test_function(name, arguments, options, content, lineno,
return [info]
directive_test_function.arguments = (0, 1, 1)
directive_test_function.options = {'option': directives.unchanged}
directive_test_function.options = {'option': directives.unchanged_required}
directive_test_function.content = 1
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -42,7 +42,8 @@ def contents(name, arguments, options, content, lineno,
contents.arguments = (0, 1, 1)
contents.options = {'depth': directives.nonnegative_int,
'local': directives.flag,
'backlinks': backlinks}
'backlinks': backlinks,
'class': directives.class_option}
def sectnum(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
......
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:09 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/spec/howto/i18n.html>.
"""
This package contains modules for language-dependent features of
reStructuredText.
......
# Author: Jannie Hofmeyr
# Contact: jhsh@sun.ac.za
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'aandag': 'attention',
'versigtig': 'caution',
'gevaar': 'danger',
'fout': 'error',
'wenk': 'hint',
'belangrik': 'important',
'nota': 'note',
'tip': 'tip', # hint and tip both have the same translation: wenk
'waarskuwing': 'warning',
'vermaning': 'admonition',
'kantstreep': 'sidebar',
'onderwerp': 'topic',
'lynblok': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubriek': 'rubric',
'epigraaf': 'epigraph',
'hoogtepunte': 'highlights',
'pull-quote (translation required)': 'pull-quote',
#'vrae': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
#'beeldkaart': 'imagemap',
'beeld': 'image',
'figuur': 'figure',
'insluiting': 'include',
'rou': 'raw',
'vervang': 'replace',
'unicode': 'unicode', # should this be translated? unikode
'klas': 'class',
'inhoud': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
#'voetnote': 'footnotes',
#'aanhalings': 'citations',
'teikennotas': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Afrikaans name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'afkorting': 'abbreviation',
'ab': 'abbreviation',
'akroniem': 'acronym',
'ac': 'acronym',
'indeks': 'index',
'i': 'index',
'voetskrif': 'subscript',
'sub': 'subscript',
'boskrif': 'superscript',
'sup': 'superscript',
'titelverwysing': 'title-reference',
'titel': 'title-reference',
't': 'title-reference',
'pep-verwysing': 'pep-reference',
'pep': 'pep-reference',
'rfc-verwysing': 'rfc-reference',
'rfc': 'rfc-reference',
'nadruk': 'emphasis',
'sterk': 'strong',
'literal (translation required)': 'literal',
'benoemde verwysing': 'named-reference',
'anonieme verwysing': 'anonymous-reference',
'voetnootverwysing': 'footnote-reference',
'aanhalingverwysing': 'citation-reference',
'vervangingsverwysing': 'substitution-reference',
'teiken': 'target',
'uri-verwysing': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',}
"""Mapping of Afrikaans role names to canonical role names for interpreted text.
"""
# -*- coding: iso-8859-1 -*-
# Author: Engelbert Gruber
# Contact: grubert@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German-language mappings for language-dependent features of
reStructuredText.
......@@ -22,9 +28,15 @@ directives = {
'notiz': 'note',
'tip': 'tip',
'warnung': 'warning',
'topic': 'topic', # berbegriff
'ermahnung': 'admonition',
'kasten': 'sidebar', # seitenkasten ?
'thema': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
'rubrik': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote', # kasten too ?
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
......@@ -35,7 +47,9 @@ directives = {
'raw': 'raw', # unbearbeitet
'include': 'include', # einfügen, "füge ein" would be more like a command.
# einfügung would be the noun.
'replace': 'replace', # ersetzen, ersetze
'ersetzung': 'replace', # ersetzen, ersetze
'unicode': 'unicode',
'klasse': 'class', # offer class too ?
'inhalt': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
......@@ -43,5 +57,27 @@ directives = {
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'abbreviation (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'index (translation required)': 'index',
'subscript (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',}
"""Mapping of German role names to canonical role names for interpreted text.
"""
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of
reStructuredText.
......@@ -13,6 +18,7 @@ __docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention': 'attention',
'caution': 'caution',
'danger': 'danger',
......@@ -22,9 +28,15 @@ directives = {
'note': 'note',
'tip': 'tip',
'warning': 'warning',
'admonition': 'admonition',
'sidebar': 'sidebar',
'topic': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
'rubric': 'rubric',
'epigraph': 'epigraph',
'highlights': 'highlights',
'pull-quote': 'pull-quote',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
......@@ -35,23 +47,30 @@ directives = {
'include': 'include',
'raw': 'raw',
'replace': 'replace',
'unicode': 'unicode',
'class': 'class',
'contents': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'target-notes': 'target-notes',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'abbreviation': 'abbreviation',
'ab': 'abbreviation',
'acronym': 'acronym',
'ac': 'acronym',
'index': 'index',
'i': 'index',
'subscript': 'subscript',
'sub': 'subscript',
'superscript': 'superscript',
'sup': 'superscript',
'title-reference': 'title-reference',
'title': 'title-reference',
't': 'title-reference',
......@@ -70,7 +89,6 @@ roles = {
'target': 'target',
'uri-reference': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
}
'url': 'uri-reference',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
# Author: Marcelo Huerta San Martn
# Contact: mghsm@uol.com.ar
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'atenci\u00f3n': 'attention',
u'atencion': 'attention',
u'precauci\u00f3n': 'caution',
u'precaucion': 'caution',
u'peligro': 'danger',
u'error': 'error',
u'sugerencia': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consejo': 'tip',
u'advertencia': 'warning',
u'exhortacion': 'admonition',
u'exhortaci\u00f3n': 'admonition',
u'nota-al-margen': 'sidebar',
u'tema': 'topic',
u'bloque-de-lineas': 'line-block',
u'bloque-de-l\u00edneas': 'line-block',
u'literal-evaluado': 'parsed-literal',
u'firma': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'epigrafe': 'epigraph',
u'destacado': 'highlights',
u'cita-destacada': 'pull-quote',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
#'imagemap': 'imagemap',
u'imagen': 'image',
u'figura': 'figure',
u'incluir': 'include',
u'raw': 'raw',
u'reemplazar': 'replace',
u'unicode': 'unicode',
u'clase': 'class',
u'contenido': 'contents',
u'numseccion': 'sectnum',
u'numsecci\u00f3n': 'sectnum',
u'numeracion-seccion': 'sectnum',
u'numeraci\u00f3n-secci\u00f3n': 'sectnum',
u'notas-destino': 'target-notes',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Spanish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acronimo': 'acronym',
u'acronimo': 'acronym',
u'ac': 'acronym',
u'indice': 'index',
u'i': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'referencia-titulo': 'title-reference',
u'titulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'enfasis': 'emphasis',
u'\u00e9nfasis': 'emphasis',
u'destacado': 'strong',
u'literal': 'literal',
u'referencia-con-nombre': 'named-reference',
u'referencia-anonima': 'anonymous-reference',
u'referencia-an\u00f3nima': 'anonymous-reference',
u'referencia-nota-al-pie': 'footnote-reference',
u'referencia-cita': 'citation-reference',
u'referencia-sustitucion': 'substitution-reference',
u'referencia-sustituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
}
"""Mapping of Spanish role names to canonical role names for interpreted text.
"""
# Author: your name here!
# Authors: David Goodger; William Dode
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of
reStructuredText.
......@@ -13,34 +18,72 @@ __docformat__ = 'reStructuredText'
directives = {
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
#'questions (translation required)': 'questions',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
'target-notes (translation required)': 'target-notes',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
u'attention': 'attention',
u'pr\u00E9caution': 'caution',
u'danger': 'danger',
u'erreur': 'error',
u'conseil': 'hint',
u'important': 'important',
u'note': 'note',
u'astuce': 'tip',
u'avertissement': 'warning',
u'admonition': 'admonition',
u'encadr\u00E9': 'sidebar',
u'sujet': 'topic',
u'bloc-textuel': 'line-block',
u'bloc-interpr\u00E9t\u00E9': 'parsed-literal',
u'code-interpr\u00E9t\u00E9': 'parsed-literal',
u'intertitre': 'rubric',
u'exergue': 'epigraph',
u'\u00E9pigraphe': 'epigraph',
u'chapeau': 'highlights',
u'accroche': 'pull-quote',
#u'questions': 'questions',
#u'qr': 'questions',
#u'faq': 'questions',
u'm\u00E9ta': 'meta',
#u'imagemap (translation required)': 'imagemap',
u'image': 'image',
u'figure': 'figure',
u'inclure': 'include',
u'brut': 'raw',
u'remplacer': 'replace',
u'remplace': 'replace',
u'unicode': 'unicode',
u'classe': 'class',
u'sommaire': 'contents',
u'table-des-mati\u00E8res': 'contents',
u'sectnum': 'sectnum',
u'section-num\u00E9rot\u00E9e': 'sectnum',
u'liens': 'target-notes',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
}
"""French name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abr\u00E9viation': 'abbreviation',
u'acronyme': 'acronym',
u'sigle': 'acronym',
u'index': 'index',
u'indice': 'subscript',
u'ind': 'subscript',
u'exposant': 'superscript',
u'exp': 'superscript',
u'titre-r\u00E9f\u00E9rence': 'title-reference',
u'titre': 'title-reference',
u'pep-r\u00E9f\u00E9rence': 'pep-reference',
u'rfc-r\u00E9f\u00E9rence': 'rfc-reference',
u'emphase': 'emphasis',
u'fort': 'strong',
u'litt\u00E9ral': 'literal',
u'nomm\u00E9e-r\u00E9f\u00E9rence': 'named-reference',
u'anonyme-r\u00E9f\u00E9rence': 'anonymous-reference',
u'note-r\u00E9f\u00E9rence': 'footnote-reference',
u'citation-r\u00E9f\u00E9rence': 'citation-reference',
u'substitution-r\u00E9f\u00E9rence': 'substitution-reference',
u'lien': 'target',
u'uri-r\u00E9f\u00E9rence': 'uri-reference',}
"""Mapping of French role names to canonical role names for interpreted text.
"""
# Author: Nicola Larosa
# Contact: docutils@tekNico.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Italian-language mappings for language-dependent features of
reStructuredText.
......@@ -22,9 +27,15 @@ directives = {
'nota': 'note',
'consiglio': 'tip',
'avvertenza': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'argomento': 'topic',
'blocco di linee': 'line-block',
'parsed-literal': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
......@@ -35,6 +46,8 @@ directives = {
'includi': 'include',
'grezzo': 'raw',
'sostituisci': 'replace',
'unicode': 'unicode',
'class (translation required)': 'class',
'indice': 'contents',
'seznum': 'sectnum',
'section-numbering': 'sectnum',
......@@ -42,5 +55,27 @@ directives = {
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
"""Italian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'abbreviation (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'index (translation required)': 'index',
'subscript (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',}
"""Mapping of Italian role names to canonical role names for interpreted text.
"""
# Author: Roman Suzi
# Contact: rnd@onego.ru
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'\u0431\u043b\u043e\u043a-\u0441\u0442\u0440\u043e\u043a': u'line-block',
u'meta': u'meta',
u'\u043e\u0431\u0440\u0430\u0431\u043e\u0442\u0430\u043d\u043d\u044b\u0439-\u043b\u0438\u0442\u0435\u0440\u0430\u043b':
u'parsed-literal',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430':
u'pull-quote',
u'\u0441\u044b\u0440\u043e\u0439': u'raw',
u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace',
u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext':
u'restructuredtext-test-directive',
u'\u0446\u0435\u043b\u0435\u0432\u044b\u0435-\u0441\u043d\u043e\u0441\u043a\u0438':
u'target-notes',
u'unicode': u'unicode',
u'\u0431\u043e\u043a\u043e\u0432\u0430\u044f-\u043f\u043e\u043b\u043e\u0441\u0430':
u'sidebar',
u'\u0432\u0430\u0436\u043d\u043e': u'important',
u'\u0432\u043a\u043b\u044e\u0447\u0430\u0442\u044c': u'include',
u'\u0432\u043d\u0438\u043c\u0430\u043d\u0438\u0435': u'attention',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': u'highlights',
u'\u0437\u0430\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'admonition',
u'\u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435':
u'image',
u'\u043a\u043b\u0430\u0441\u0441': u'class',
u'\u043d\u043e\u043c\u0435\u0440-\u0440\u0430\u0437\u0434\u0435\u043b\u0430':
u'sectnum',
u'\u043d\u0443\u043c\u0435\u0440\u0430\u0446\u0438\u044f-\u0440\u0430\u0437'
u'\u0434\u0435\u043b\u043e\u0432': u'sectnum',
u'\u043e\u043f\u0430\u0441\u043d\u043e': u'danger',
u'\u043e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e': u'caution',
u'\u043e\u0448\u0438\u0431\u043a\u0430': u'error',
u'\u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430': u'tip',
u'\u043f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436\u0434\u0435\u043d'
u'\u0438\u0435': u'warning',
u'\u043f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'note',
u'\u0440\u0438\u0441\u0443\u043d\u043e\u043a': u'figure',
u'\u0440\u0443\u0431\u0440\u0438\u043a\u0430': u'rubric',
u'\u0441\u043e\u0432\u0435\u0442': u'hint',
u'\u0441\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435': u'contents',
u'\u0442\u0435\u043c\u0430': u'topic',
u'\u044d\u043f\u0438\u0433\u0440\u0430\u0444': u'epigraph'}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'\u0430\u043a\u0440\u043e\u043d\u0438\u043c': 'acronym',
u'\u0430\u043d\u043e\u043d\u0438\u043c\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'anonymous-reference',
u'\u0431\u0443\u043a\u0432\u0430\u043b\u044c\u043d\u043e': 'literal',
u'\u0432\u0435\u0440\u0445\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'superscript',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': 'emphasis',
u'\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'named-reference',
u'\u0438\u043d\u0434\u0435\u043a\u0441': 'index',
u'\u043d\u0438\u0436\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'subscript',
u'\u0441\u0438\u043b\u044c\u043d\u043e\u0435-\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435':
'strong',
u'\u0441\u043e\u043a\u0440\u0430\u0449\u0435\u043d\u0438\u0435':
'abbreviation',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u0437\u0430\u043c\u0435\u043d\u0430':
'substitution-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-pep': 'pep-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-rfc': 'rfc-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-uri': 'uri-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0437\u0430\u0433\u043b\u0430\u0432\u0438\u0435':
'title-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0441\u043d\u043e\u0441\u043a\u0443':
'footnote-reference',
u'\u0446\u0438\u0442\u0430\u0442\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'citation-reference',
u'\u0446\u0435\u043b\u044c': 'target'}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
# Author: Miroslav Vasko
# Contact: zemiak@zoznam.sk
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Slovak-language mappings for language-dependent features of
reStructuredText.
......@@ -22,9 +27,15 @@ directives = {
u'pozn\xe1mka': 'note',
u'tip': 'tip',
u'varovanie': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u't\xe9ma': 'topic',
u'blok-riadkov': 'line-block',
u'parsed-literal': 'parsed-literal',
u'rubric (translation required)': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
#u'questions': 'questions',
#u'qa': 'questions',
#u'faq': 'questions',
......@@ -35,6 +46,8 @@ directives = {
u'vlo\x9ei\x9d': 'include',
u'raw': 'raw',
u'nahradi\x9d': 'replace',
u'unicode': 'unicode',
u'class (translation required)': 'class',
u'obsah': 'contents',
u'\xe8as\x9d': 'sectnum',
u'\xe8as\x9d-\xe8\xedslovanie': 'sectnum',
......@@ -44,3 +57,25 @@ directives = {
}
"""Slovak name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',}
"""Mapping of Slovak role names to canonical role names for interpreted text.
"""
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:13 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
......@@ -21,9 +26,15 @@ directives = {
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u'\u00e4mne': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'mellanrubrik': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
......@@ -32,15 +43,38 @@ directives = {
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
'include (translation required)': 'include',
u'inkludera': 'include',
u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
'replace (translation required)': 'replace',
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'class (translation required)': 'class',
u'inneh\u00e5ll': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
'target-notes (translation required)': 'target-notes',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) <> n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile('''
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
''' ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:07 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:40 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -107,11 +107,12 @@ __docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import TupleType
from docutils import nodes, statemachine, utils, roman, urischemes
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.utils import normalize_name
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.parsers.rst import directives, languages, tableparser
from docutils.parsers.rst.languages import en as _fallback_language_module
......@@ -159,6 +160,7 @@ class RSTStateMachine(StateMachineWS):
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
......@@ -271,8 +273,10 @@ class RSTState(StateWS):
node=node, match_titles=match_titles)
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
......@@ -340,6 +344,8 @@ class RSTState(StateWS):
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
......@@ -471,13 +477,15 @@ class Inliner:
_interpreted_roles = {
# Values of ``None`` mean "not implemented yet":
'title-reference': 'title_reference_role',
'abbreviation': None,
'acronym': None,
'title-reference': 'generic_interpreted_role',
'abbreviation': 'generic_interpreted_role',
'acronym': 'generic_interpreted_role',
'index': None,
'emphasis': None,
'strong': None,
'literal': None,
'subscript': 'generic_interpreted_role',
'superscript': 'generic_interpreted_role',
'emphasis': 'generic_interpreted_role',
'strong': 'generic_interpreted_role',
'literal': 'generic_interpreted_role',
'named-reference': None,
'anonymous-reference': None,
'uri-reference': None,
......@@ -487,7 +495,7 @@ class Inliner:
'citation-reference': None,
'substitution-reference': None,
'target': None,
}
'restructuredtext-unimplemented-role': None}
"""Mapping of canonical interpreted text role name to method name.
Initializes a name to bound-method mapping in `__init__`."""
......@@ -495,6 +503,18 @@ class Inliner:
"""The role to use when no explicit role is given.
Override in subclasses."""
generic_roles = {'abbreviation': nodes.abbreviation,
'acronym': nodes.acronym,
'emphasis': nodes.emphasis,
'literal': nodes.literal,
'strong': nodes.strong,
'subscript': nodes.subscript,
'superscript': nodes.superscript,
'title-reference': nodes.title_reference,}
"""Mapping of canonical interpreted text role name to node class.
Used by the `generic_interpreted_role` method for simple, straightforward
roles (simple wrapping; no extra processing)."""
def __init__(self, roles=None):
"""
`roles` is a mapping of canonical role name to role function or bound
......@@ -872,9 +892,11 @@ class Inliner:
return uri
def interpreted(self, before, after, rawsource, text, role, lineno):
role_function, messages = self.get_role_function(role, lineno)
role_function, canonical, messages = self.get_role_function(role,
lineno)
if role_function:
nodelist, messages2 = role_function(role, rawsource, text, lineno)
nodelist, messages2 = role_function(canonical, rawsource, text,
lineno)
messages.extend(messages2)
return before, nodelist, after, messages
else:
......@@ -885,34 +907,34 @@ class Inliner:
msg_text = []
if role:
name = role.lower()
canonical = None
try:
canonical = self.language.roles[name]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (self.language, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role, self.language.__name__))
if not canonical:
try:
canonical = _fallback_language_module.roles[name]
msg_text.append('Using English fallback for role "%s".'
% role)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role)
# Should be an English name, but just in case:
canonical = name
if msg_text:
message = self.reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
else:
name = self.default_interpreted_role
canonical = None
try:
canonical = self.language.roles[name]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (self.language, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (name, self.language.__name__))
if not canonical:
try:
return self.interpreted_roles[canonical], messages
canonical = _fallback_language_module.roles[name]
msg_text.append('Using English fallback for role "%s".'
% name)
except KeyError:
raise UnknownInterpretedRoleError(messages)
else:
return self.interpreted_roles[self.default_interpreted_role], []
msg_text.append('Trying "%s" as canonical role name.'
% name)
# Should be an English name, but just in case:
canonical = name
if msg_text:
message = self.reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
try:
return self.interpreted_roles[canonical], canonical, messages
except KeyError:
raise UnknownInterpretedRoleError(messages)
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
......@@ -936,26 +958,22 @@ class Inliner:
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subrefnode = inlines[0]
if isinstance(subrefnode, nodes.substitution_reference):
subreftext = subrefnode.astext()
refname = normalize_name(subreftext)
subrefnode['refname'] = refname
self.document.note_substitution_ref(
subrefnode)
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
referencenode = nodes.reference(
'|%s%s' % (subreftext, endstring), '')
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
referencenode['anonymous'] = 1
reference_node['anonymous'] = 1
self.document.note_anonymous_ref(
referencenode)
reference_node)
else:
referencenode['refname'] = refname
self.document.note_refname(
referencenode)
referencenode += subrefnode
inlines = [referencenode]
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
......@@ -965,6 +983,9 @@ class Inliner:
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
......@@ -986,10 +1007,9 @@ class Inliner:
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [refnode], string[matchend:], [])
if self.document.settings.trim_footnote_reference_space:
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
......@@ -1084,8 +1104,15 @@ class Inliner:
'_': reference,
'__': anonymous_reference}
def title_reference_role(self, role, rawtext, text, lineno):
return [nodes.title_reference(rawtext, text)], []
def generic_interpreted_role(self, role, rawtext, text, lineno):
try:
role_class = self.generic_roles[role]
except KeyError:
msg = self.reporter.error('Unknown interpreted text role: "%s".'
% role, line=lineno)
prb = self.problematic(text, text, msg)
return [prb], [msg]
return [role_class(rawtext, text)], []
def pep_reference_role(self, role, rawtext, text, lineno):
try:
......@@ -1208,16 +1235,72 @@ class Body(RSTState):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
blockquote = self.block_quote(indented, line_offset)
blockquote, messages = self.block_quote(indented, line_offset)
self.parent += blockquote
self.parent += messages
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
blockquote_lines, attribution_lines, attribution_offset = \
self.check_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(indented, line_offset, blockquote)
return blockquote
self.nested_parse(blockquote_lines, line_offset, blockquote)
messages = []
if attribution_lines:
attribution, messages = self.parse_attribution(attribution_lines,
attribution_offset)
blockquote += attribution
return blockquote, messages
attribution_pattern = re.compile(r'--(?![-\n]) *(?=[^ \n])')
def check_attribution(self, indented, line_offset):
"""
Check for an attribution in the last contiguous block of `indented`.
* First line after last blank line must begin with "--" (etc.).
* Every line after that must have consistent indentation.
Return a 3-tuple: (block quote lines, attribution lines,
attribution offset).
"""
blank = None
nonblank_seen = None
indent = 0
for i in range(len(indented) - 1, 0, -1): # don't check first line
this_line_blank = not indented[i].strip()
if nonblank_seen and this_line_blank:
match = self.attribution_pattern.match(indented[i + 1])
if match:
blank = i
break
elif not this_line_blank:
nonblank_seen = 1
if blank and len(indented) - blank > 2: # multi-line attribution
indent = (len(indented[blank + 2])
- len(indented[blank + 2].lstrip()))
for j in range(blank + 3, len(indented)):
if indent != (len(indented[j])
- len(indented[j].lstrip())): # bad shape
blank = None
break
if blank:
a_lines = indented[blank + 1:]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:blank], a_lines, line_offset + blank + 1)
else:
return (indented, None, None)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
......@@ -1436,8 +1519,9 @@ class Body(RSTState):
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
blockquote = self.block_quote(indented, line_offset)
blockquote, messages = self.block_quote(indented, line_offset)
self.parent += blockquote
self.parent += messages
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
......@@ -1689,6 +1773,7 @@ class Body(RSTState):
(?P=quote) # close quote if open quote used
)
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
......@@ -1864,34 +1949,31 @@ class Body(RSTState):
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
name = normalize_name(subname)
substitutionnode = nodes.substitution_definition(
blocktext, name=name, alt=subname)
substitutionnode.line = lineno
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.line = lineno
self.document.note_substitution_def(
substitution_node,subname, self.parent)
if block:
block[0] = block[0].strip()
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitutionnode,
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitutionnode[:]:
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitutionnode[i]
del substitutionnode[i]
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
if len(substitutionnode) == 0:
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.'
% subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
else:
del substitutionnode['alt']
self.document.note_substitution_def(
substitutionnode, self.parent)
return [substitutionnode], blank_finish
return [substitution_node], blank_finish
else:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
......@@ -2112,6 +2194,7 @@ class Body(RSTState):
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
......@@ -2147,7 +2230,8 @@ class Body(RSTState):
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish)
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
......@@ -2452,11 +2536,8 @@ class SubstitutionDef(Body):
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
if self.parent.has_key('alt'):
option_presets = {'alt': self.parent['alt']}
else:
option_presets = {}
nodelist, blank_finish = self.directive(match, **option_presets)
nodelist, blank_finish = self.directive(match,
alt=self.parent['name'])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
......@@ -2591,8 +2672,9 @@ class Text(RSTState):
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
termlist, messages = self.term(
termline, self.state_machine.abs_line_number() - 1)
lineno = self.state_machine.abs_line_number() - 1
definitionlistitem.line = lineno
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
......@@ -2678,7 +2760,9 @@ class Line(SpecializedText):
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if len(marker) < 4:
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
......@@ -2741,7 +2825,7 @@ class Line(SpecializedText):
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing underline for overline.',
'Missing matching underline for section title overline.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
......@@ -2819,8 +2903,13 @@ def escape2null(text):
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""Return a string with nulls removed or restored to backslashes."""
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
return ''.join(text.split('\x00'))
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:07 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:41 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -131,7 +131,8 @@ class GridTableParser(TableParser):
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = list(block) # make a copy; it may be modified
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
......@@ -165,7 +166,9 @@ class GridTableParser(TableParser):
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.get_cell_block(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
......@@ -188,19 +191,6 @@ class GridTableParser(TableParser):
return None
return 1
def get_cell_block(self, top, left, bottom, right):
"""Given the corners, extract the text of a cell."""
cellblock = []
margin = right
for lineno in range(top + 1, bottom):
line = self.block[lineno][left + 1 : right].rstrip()
cellblock.append(line)
if line:
margin = min(margin, len(line) - len(line.lstrip()))
if 0 < margin < right:
cellblock = [line[margin:] for line in cellblock]
return cellblock
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
......@@ -278,7 +268,7 @@ class GridTableParser(TableParser):
def structure_from_cells(self):
"""
From the data colledted by `scan_cell()`, convert to the final data
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
......@@ -371,7 +361,8 @@ class SimpleTableParser(TableParser):
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = list(block) # make a copy; it will be modified
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
......@@ -394,25 +385,26 @@ class SimpleTableParser(TableParser):
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
block = self.block[1:]
offset = 0
# Container for accumulating text lines until a row is complete:
rowlines = []
while block:
line = block.pop(0)
offset += 1
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(rowlines, (line.rstrip(), offset))
rowlines = []
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if rowlines:
self.parse_row(rowlines)
rowlines = [(line.rstrip(), offset)]
else:
# Accumulate lines of incomplete row.
rowlines.append((line.rstrip(), offset))
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
......@@ -448,12 +440,12 @@ class SimpleTableParser(TableParser):
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem at '
'line offset %s.' % offset)
cells.append((0, morecols, offset, []))
'line offset %s.' % (offset + 1))
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, spanline=None):
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
......@@ -462,20 +454,30 @@ class SimpleTableParser(TableParser):
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
while lines and not lines[-1][0]:
lines.pop() # Remove blank trailing lines.
if lines:
offset = lines[0][1]
elif spanline:
offset = spanline[1]
else:
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
row = self.init_row(columns, offset)
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
......@@ -483,30 +485,20 @@ class SimpleTableParser(TableParser):
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
block = []
margin = sys.maxint
for line, offset in lines:
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
columns[lastcol] = (start, start + len(text))
self.adjust_last_column(start + len(text))
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin at line '
'offset %s.' % offset)
else:
text = line[start:end].rstrip()
block.append(text)
if text:
margin = min(margin, len(text) - len(text.lstrip()))
if 0 < margin < sys.maxint:
block = [line[margin:] for line in block]
row[i][3].extend(block)
self.table.append(row)
def adjust_last_column(self, new_end):
start, end = self.columns[-1]
if new_end > end:
self.columns[-1] = (start, new_end)
'offset %s.' % (first_line + offset))
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
......
# Authors: David Goodger; Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:15 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:15 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -11,10 +11,6 @@ Python Enhancement Proposal (PEP) Reader.
__docformat__ = 'reStructuredText'
import sys
import os
import re
from docutils import nodes
from docutils.readers import standalone
from docutils.transforms import peps, references
from docutils.parsers import rst
......@@ -23,7 +19,7 @@ from docutils.parsers import rst
class Inliner(rst.states.Inliner):
"""
Extend `rst.Inliner` to for local PEP references.
Extend `rst.Inliner` for local PEP references.
"""
pep_url = rst.states.Inliner.pep_url_local
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:58 $
# Copyright: This module has been placed in the public domain.
"""
This package contains the Python Source Reader modules.
"""
__docformat__ = 'reStructuredText'
import sys
import docutils.readers
class Reader(docutils.readers.Reader):
pass
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:48:20 $
# Copyright: This module has been placed in the public domain.
"""
Parser for Python modules.
The `parse_module()` function takes a module's text and file name, runs it
through the module parser (using compiler.py and tokenize.py) and produces a
"module documentation tree": a high-level AST full of nodes that are
interesting from an auto-documentation standpoint. For example, given this
module (x.py)::
# comment
'''Docstring'''
'''Additional docstring'''
__docformat__ = 'reStructuredText'
a = 1
'''Attribute docstring'''
class C(Super):
'''C's docstring'''
class_attribute = 1
'''class_attribute's docstring'''
def __init__(self, text=None):
'''__init__'s docstring'''
self.instance_attribute = (text * 7
+ ' whaddyaknow')
'''instance_attribute's docstring'''
def f(x, # parameter x
y=a*5, # parameter y
*args): # parameter args
'''f's docstring'''
return [x + item for item in args]
f.function_attribute = 1
'''f.function_attribute's docstring'''
The module parser will produce this module documentation tree::
<Module filename="test data">
<Comment lineno=1>
comment
<Docstring>
Docstring
<Docstring lineno="5">
Additional docstring
<Attribute lineno="7" name="__docformat__">
<Expression lineno="7">
'reStructuredText'
<Attribute lineno="9" name="a">
<Expression lineno="9">
1
<Docstring lineno="10">
Attribute docstring
<Class bases="Super" lineno="12" name="C">
<Docstring lineno="12">
C's docstring
<Attribute lineno="16" name="class_attribute">
<Expression lineno="16">
1
<Docstring lineno="17">
class_attribute's docstring
<Method lineno="19" name="__init__">
<Docstring lineno="19">
__init__'s docstring
<ParameterList lineno="19">
<Parameter lineno="19" name="self">
<Parameter lineno="19" name="text">
<Default lineno="19">
None
<Attribute lineno="22" name="self.instance_attribute">
<Expression lineno="22">
(text * 7 + ' whaddyaknow')
<Docstring lineno="24">
instance_attribute's docstring
<Function lineno="27" name="f">
<Docstring lineno="27">
f's docstring
<ParameterList lineno="27">
<Parameter lineno="27" name="x">
<Comment>
# parameter x
<Parameter lineno="27" name="y">
<Default lineno="27">
a * 5
<Comment>
# parameter y
<ExcessPositionalArguments lineno="27" name="args">
<Comment>
# parameter args
<Attribute lineno="33" name="f.function_attribute">
<Expression lineno="33">
1
<Docstring lineno="34">
f.function_attribute's docstring
(Comments are not implemented yet.)
compiler.parse() provides most of what's needed for this doctree, and
"tokenize" can be used to get the rest. We can determine the line number from
the compiler.parse() AST, and the TokenParser.rhs(lineno) method provides the
rest.
The Docutils Python reader component will transform this module doctree into a
Python-specific Docutils doctree, and then a `stylist transform`_ will
further transform it into a generic doctree. Namespaces will have to be
compiled for each of the scopes, but I'm not certain at what stage of
processing.
It's very important to keep all docstring processing out of this, so that it's
a completely generic and not tool-specific.
> Why perform all of those transformations? Why not go from the AST to a
> generic doctree? Or, even from the AST to the final output?
I want the docutils.readers.python.moduleparser.parse_module() function to
produce a standard documentation-oriented tree that can be used by any tool.
We can develop it together without having to compromise on the rest of our
design (i.e., HappyDoc doesn't have to be made to work like Docutils, and
vice-versa). It would be a higher-level version of what compiler.py provides.
The Python reader component transforms this generic AST into a Python-specific
doctree (it knows about modules, classes, functions, etc.), but this is
specific to Docutils and cannot be used by HappyDoc or others. The stylist
transform does the final layout, converting Python-specific structures
("class" sections, etc.) into a generic doctree using primitives (tables,
sections, lists, etc.). This generic doctree does *not* know about Python
structures any more. The advantage is that this doctree can be handed off to
any of the output writers to create any output format we like.
The latter two transforms are separate because I want to be able to have
multiple independent layout styles (multiple runtime-selectable "stylist
transforms"). Each of the existing tools (HappyDoc, pydoc, epydoc, Crystal,
etc.) has its own fixed format. I personally don't like the tables-based
format produced by these tools, and I'd like to be able to customize the
format easily. That's the goal of stylist transforms, which are independent
from the Reader component itself. One stylist transform could produce
HappyDoc-like output, another could produce output similar to module docs in
the Python library reference manual, and so on.
It's for exactly this reason:
>> It's very important to keep all docstring processing out of this, so that
>> it's a completely generic and not tool-specific.
... but it goes past docstring processing. It's also important to keep style
decisions and tool-specific data transforms out of this module parser.
Issues
======
* At what point should namespaces be computed? Should they be part of the
basic AST produced by the ASTVisitor walk, or generated by another tree
traversal?
* At what point should a distinction be made between local variables &
instance attributes in __init__ methods?
* Docstrings are getting their lineno from their parents. Should the
TokenParser find the real line no's?
* Comments: include them? How and when? Only full-line comments, or
parameter comments too? (See function "f" above for an example.)
* Module could use more docstrings & refactoring in places.
"""
__docformat__ = 'reStructuredText'
import sys
import compiler
import compiler.ast
import tokenize
import token
from compiler.consts import OP_ASSIGN
from compiler.visitor import ASTVisitor
from types import StringType, UnicodeType, TupleType
def parse_module(module_text, filename):
"""Return a module documentation tree from `module_text`."""
ast = compiler.parse(module_text)
token_parser = TokenParser(module_text)
visitor = ModuleVisitor(filename, token_parser)
compiler.walk(ast, visitor, walker=visitor)
return visitor.module
class Node:
"""
Base class for module documentation tree nodes.
"""
def __init__(self, node):
self.children = []
"""List of child nodes."""
self.lineno = node.lineno
"""Line number of this node (or ``None``)."""
def __str__(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, repr(self))] +
[child.__str__(indent, level+1)
for child in self.children])
def __repr__(self):
parts = [self.__class__.__name__]
for name, value in self.attlist():
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def attlist(self, **atts):
if self.lineno is not None:
atts['lineno'] = self.lineno
attlist = atts.items()
attlist.sort()
return attlist
def append(self, node):
self.children.append(node)
def extend(self, node_list):
self.children.extend(node_list)
class TextNode(Node):
def __init__(self, node, text):
Node.__init__(self, node)
self.text = trim_docstring(text)
def __str__(self, indent=' ', level=0):
prefix = indent * (level + 1)
text = '\n'.join([prefix + line for line in self.text.splitlines()])
return Node.__str__(self, indent, level) + text + '\n'
class Module(Node):
def __init__(self, node, filename):
Node.__init__(self, node)
self.filename = filename
def attlist(self):
return Node.attlist(self, filename=self.filename)
class Docstring(TextNode): pass
class Comment(TextNode): pass
class Import(Node):
def __init__(self, node, names, from_name=None):
Node.__init__(self, node)
self.names = names
self.from_name = from_name
def __str__(self, indent=' ', level=0):
prefix = indent * (level + 1)
lines = []
for name, as in self.names:
if as:
lines.append('%s%s as %s' % (prefix, name, as))
else:
lines.append('%s%s' % (prefix, name))
text = '\n'.join(lines)
return Node.__str__(self, indent, level) + text + '\n'
def attlist(self):
if self.from_name:
atts = {'from': self.from_name}
else:
atts = {}
return Node.attlist(self, **atts)
class Attribute(Node):
def __init__(self, node, name):
Node.__init__(self, node)
self.name = name
def attlist(self):
return Node.attlist(self, name=self.name)
class AttributeTuple(Node):
def __init__(self, node, names):
Node.__init__(self, node)
self.names = names
def attlist(self):
return Node.attlist(self, names=' '.join(self.names))
class Expression(TextNode):
def __str__(self, indent=' ', level=0):
prefix = indent * (level + 1)
return '%s%s%s\n' % (Node.__str__(self, indent, level),
prefix, self.text.encode('unicode-escape'))
class Function(Attribute): pass
class ParameterList(Node): pass
class Parameter(Attribute): pass
class ParameterTuple(AttributeTuple):
def attlist(self):
return Node.attlist(self, names=normalize_parameter_name(self.names))
class ExcessPositionalArguments(Parameter): pass
class ExcessKeywordArguments(Parameter): pass
class Default(Expression): pass
class Class(Node):
def __init__(self, node, name, bases=None):
Node.__init__(self, node)
self.name = name
self.bases = bases or []
def attlist(self):
atts = {'name': self.name}
if self.bases:
atts['bases'] = ' '.join(self.bases)
return Node.attlist(self, **atts)
class Method(Function): pass
class BaseVisitor(ASTVisitor):
def __init__(self, token_parser):
ASTVisitor.__init__(self)
self.token_parser = token_parser
self.context = []
self.documentable = None
def default(self, node, *args):
self.documentable = None
#print 'in default (%s)' % node.__class__.__name__
#ASTVisitor.default(self, node, *args)
def default_visit(self, node, *args):
#print 'in default_visit (%s)' % node.__class__.__name__
ASTVisitor.default(self, node, *args)
class DocstringVisitor(BaseVisitor):
def visitDiscard(self, node):
if self.documentable:
self.visit(node.expr)
def visitConst(self, node):
if self.documentable:
if type(node.value) in (StringType, UnicodeType):
self.documentable.append(Docstring(node, node.value))
else:
self.documentable = None
def visitStmt(self, node):
self.default_visit(node)
class AssignmentVisitor(DocstringVisitor):
def visitAssign(self, node):
visitor = AttributeVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
if visitor.attributes:
self.context[-1].extend(visitor.attributes)
if len(visitor.attributes) == 1:
self.documentable = visitor.attributes[0]
else:
self.documentable = None
class ModuleVisitor(AssignmentVisitor):
def __init__(self, filename, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.filename = filename
self.module = None
def visitModule(self, node):
self.module = module = Module(node, self.filename)
if node.doc is not None:
module.append(Docstring(node, node.doc))
self.context.append(module)
self.documentable = module
self.visit(node.node)
self.context.pop()
def visitImport(self, node):
self.context[-1].append(Import(node, node.names))
self.documentable = None
def visitFrom(self, node):
self.context[-1].append(
Import(node, node.names, from_name=node.modname))
self.documentable = None
def visitFunction(self, node):
visitor = FunctionVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
def visitClass(self, node):
visitor = ClassVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.klass)
class AttributeVisitor(BaseVisitor):
def __init__(self, token_parser):
BaseVisitor.__init__(self, token_parser)
self.attributes = []
def visitAssign(self, node):
# Don't visit the expression itself, just the attribute nodes:
for child in node.nodes:
self.dispatch(child)
expression_text = self.token_parser.rhs(node.lineno)
expression = Expression(node, expression_text)
for attribute in self.attributes:
attribute.append(expression)
def visitAssName(self, node):
self.attributes.append(Attribute(node, node.name))
def visitAssTuple(self, node):
attributes = self.attributes
self.attributes = []
self.default_visit(node)
names = [attribute.name for attribute in self.attributes]
att_tuple = AttributeTuple(node, names)
att_tuple.lineno = self.attributes[0].lineno
self.attributes = attributes
self.attributes.append(att_tuple)
def visitAssAttr(self, node):
self.default_visit(node, node.attrname)
def visitGetattr(self, node, suffix):
self.default_visit(node, node.attrname + '.' + suffix)
def visitName(self, node, suffix):
self.attributes.append(Attribute(node, node.name + '.' + suffix))
class FunctionVisitor(DocstringVisitor):
in_function = 0
function_class = Function
def visitFunction(self, node):
if self.in_function:
self.documentable = None
# Don't bother with nested function definitions.
return
self.in_function = 1
self.function = function = self.function_class(node, node.name)
if node.doc is not None:
function.append(Docstring(node, node.doc))
self.context.append(function)
self.documentable = function
self.parse_parameter_list(node)
self.visit(node.code)
self.context.pop()
def parse_parameter_list(self, node):
parameters = []
special = []
argnames = list(node.argnames)
if node.kwargs:
special.append(ExcessKeywordArguments(node, argnames[-1]))
argnames.pop()
if node.varargs:
special.append(ExcessPositionalArguments(node, argnames[-1]))
argnames.pop()
defaults = list(node.defaults)
defaults = [None] * (len(argnames) - len(defaults)) + defaults
function_parameters = self.token_parser.function_parameters(
node.lineno)
#print >>sys.stderr, function_parameters
for argname, default in zip(argnames, defaults):
if type(argname) is TupleType:
parameter = ParameterTuple(node, argname)
argname = normalize_parameter_name(argname)
else:
parameter = Parameter(node, argname)
if default:
parameter.append(Default(node, function_parameters[argname]))
parameters.append(parameter)
if parameters or special:
special.reverse()
parameters.extend(special)
parameter_list = ParameterList(node)
parameter_list.extend(parameters)
self.function.append(parameter_list)
class ClassVisitor(AssignmentVisitor):
in_class = 0
def __init__(self, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.bases = []
def visitClass(self, node):
if self.in_class:
self.documentable = None
# Don't bother with nested class definitions.
return
self.in_class = 1
#import mypdb as pdb
#pdb.set_trace()
for base in node.bases:
self.visit(base)
self.klass = klass = Class(node, node.name, self.bases)
if node.doc is not None:
klass.append(Docstring(node, node.doc))
self.context.append(klass)
self.documentable = klass
self.visit(node.code)
self.context.pop()
def visitGetattr(self, node, suffix=None):
if suffix:
name = node.attrname + '.' + suffix
else:
name = node.attrname
self.default_visit(node, name)
def visitName(self, node, suffix=None):
if suffix:
name = node.name + '.' + suffix
else:
name = node.name
self.bases.append(name)
def visitFunction(self, node):
if node.name == '__init__':
visitor = InitMethodVisitor(self.token_parser)
else:
visitor = MethodVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
class MethodVisitor(FunctionVisitor):
function_class = Method
class InitMethodVisitor(MethodVisitor, AssignmentVisitor): pass
class TokenParser:
def __init__(self, text):
self.text = text + '\n\n'
self.lines = self.text.splitlines(1)
self.generator = tokenize.generate_tokens(iter(self.lines).next)
self.next()
def __iter__(self):
return self
def next(self):
self.token = self.generator.next()
self.type, self.string, self.start, self.end, self.line = self.token
return self.token
def goto_line(self, lineno):
while self.start[0] < lineno:
self.next()
return token
def rhs(self, lineno):
"""
Return a whitespace-normalized expression string from the right-hand
side of an assignment at line `lineno`.
"""
self.goto_line(lineno)
while self.string != '=':
self.next()
self.stack = None
while self.type != token.NEWLINE and self.string != ';':
if self.string == '=' and not self.stack:
self.tokens = []
self.stack = []
self._type = None
self._string = None
self._backquote = 0
else:
self.note_token()
self.next()
self.next()
text = ''.join(self.tokens)
return text.strip()
closers = {')': '(', ']': '[', '}': '{'}
openers = {'(': 1, '[': 1, '{': 1}
del_ws_prefix = {'.': 1, '=': 1, ')': 1, ']': 1, '}': 1, ':': 1, ',': 1}
no_ws_suffix = {'.': 1, '=': 1, '(': 1, '[': 1, '{': 1}
def note_token(self):
if self.type == tokenize.NL:
return
del_ws = self.del_ws_prefix.has_key(self.string)
append_ws = not self.no_ws_suffix.has_key(self.string)
if self.openers.has_key(self.string):
self.stack.append(self.string)
if (self._type == token.NAME
or self.closers.has_key(self._string)):
del_ws = 1
elif self.closers.has_key(self.string):
assert self.stack[-1] == self.closers[self.string]
self.stack.pop()
elif self.string == '`':
if self._backquote:
del_ws = 1
assert self.stack[-1] == '`'
self.stack.pop()
else:
append_ws = 0
self.stack.append('`')
self._backquote = not self._backquote
if del_ws and self.tokens and self.tokens[-1] == ' ':
del self.tokens[-1]
self.tokens.append(self.string)
self._type = self.type
self._string = self.string
if append_ws:
self.tokens.append(' ')
def function_parameters(self, lineno):
"""
Return a dictionary mapping parameters to defaults
(whitespace-normalized strings).
"""
self.goto_line(lineno)
while self.string != 'def':
self.next()
while self.string != '(':
self.next()
name = None
default = None
parameter_tuple = None
self.tokens = []
parameters = {}
self.stack = [self.string]
self.next()
while 1:
if len(self.stack) == 1:
if parameter_tuple:
# Just encountered ")".
#print >>sys.stderr, 'parameter_tuple: %r' % self.tokens
name = ''.join(self.tokens).strip()
self.tokens = []
parameter_tuple = None
if self.string in (')', ','):
if name:
if self.tokens:
default_text = ''.join(self.tokens).strip()
else:
default_text = None
parameters[name] = default_text
self.tokens = []
name = None
default = None
if self.string == ')':
break
elif self.type == token.NAME:
if name and default:
self.note_token()
else:
assert name is None, (
'token=%r name=%r parameters=%r stack=%r'
% (self.token, name, parameters, self.stack))
name = self.string
#print >>sys.stderr, 'name=%r' % name
elif self.string == '=':
assert name is not None, 'token=%r' % (self.token,)
assert default is None, 'token=%r' % (self.token,)
assert self.tokens == [], 'token=%r' % (self.token,)
default = 1
self._type = None
self._string = None
self._backquote = 0
elif name:
self.note_token()
elif self.string == '(':
parameter_tuple = 1
self._type = None
self._string = None
self._backquote = 0
self.note_token()
else: # ignore these tokens:
assert (self.string in ('*', '**', '\n')
or self.type == tokenize.COMMENT), (
'token=%r' % (self.token,))
else:
self.note_token()
self.next()
return parameters
def trim_docstring(text):
"""
Trim indentation and blank lines from docstring text & return it.
See PEP 257.
"""
if not text:
return text
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = text.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def normalize_parameter_name(name):
"""
Converts a tuple like ``('a', ('b', 'c'), 'd')`` into ``'(a, (b, c), d)'``
"""
if type(name) is TupleType:
return '(%s)' % ', '.join([normalize_parameter_name(n) for n in name])
else:
return name
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:15 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -25,6 +25,19 @@ class Reader(readers.Reader):
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1}),))
default_transforms = (references.Substitutions,
frontmatter.DocTitle,
frontmatter.DocInfo,
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -266,7 +266,8 @@ class StateMachine:
transitions = None
state = self.get_state(next_state)
except:
self.error()
if self.debug:
self.error()
raise
self.observers = []
return results
......@@ -1294,11 +1295,11 @@ class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def strip_indent(self, length, start=0, end=sys.maxint):
def trim_left(self, length, start=0, end=sys.maxint):
"""
Strip `length` characters off the beginning of each item, in-place,
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
stripped text. Does not affect slice parent.
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
......@@ -1381,9 +1382,20 @@ class StringList(ViewList):
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.strip_indent(indent, start=(first_indent is not None))
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
......
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -105,6 +105,8 @@ class DocTitle(Transform):
default_priority = 320
def apply(self):
if not getattr(self.document.settings, 'doctitle_xform', 1):
return
if self.promote_document_title():
self.promote_document_subtitle()
......@@ -229,7 +231,25 @@ class DocInfo(Transform):
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
......@@ -252,38 +272,37 @@ class DocInfo(Transform):
for field in field_list:
try:
name = field[0][0].astext()
normedname = utils.normalize_name(name)
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and bibliofields.has_key(normedname)
and self.check_empty_biblio_field(field, name)):
raise TransformError
biblioclass = bibliofields[normedname]
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
else: # multiple body elements possible
if issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[normedname]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[normedname])
topics[normedname] = biblioclass(
'', title, CLASS=normedname, *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, CLASS=canonical, *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
continue
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -10,6 +10,7 @@ Miscellaneous transforms.
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import Transform, TransformError
......@@ -31,3 +32,31 @@ class CallBack(Transform):
pending = self.startnode
pending.details['callback'](pending)
pending.parent.remove(pending)
class ClassAttribute(Transform):
default_priority = 210
def apply(self):
pending = self.startnode
class_value = pending.details['class']
parent = pending.parent
child = pending
while parent:
for index in range(parent.index(child) + 1, len(parent)):
element = parent[index]
if isinstance(element, nodes.comment):
continue
element.set_class(class_value)
pending.parent.remove(pending)
return
else:
child = parent
parent = parent.parent
error = self.document.reporter.error(
'No suitable element following "%s" directive'
% pending.details['directive'],
nodes.literal_block(pending.rawsource, pending.rawsource),
line=pending.line)
pending.parent.replace(pending, error)
# Authors: David Goodger, Ueli Schlaepfer, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -73,8 +73,11 @@ class Contents(Transform):
def apply(self):
topic = nodes.topic(CLASS='contents')
title = self.startnode.details['title']
if self.startnode.details.has_key('local'):
details = self.startnode.details
if details.has_key('class'):
topic.set_class(details['class'])
title = details['title']
if details.has_key('local'):
startnode = self.startnode.parent
# @@@ generate an error if the startnode (directive) not at
# section/document top-level? Drag it up until it is?
......@@ -89,13 +92,13 @@ class Contents(Transform):
topic += title
else:
name = self.language.labels['contents']
name = utils.normalize_name(name)
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['name'] = name
self.document.note_implicit_target(topic)
self.toc_id = topic['id']
if self.startnode.details.has_key('backlinks'):
self.backlinks = self.startnode.details['backlinks']
if details.has_key('backlinks'):
self.backlinks = details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
contents = self.build_contents(startnode)
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -668,11 +668,17 @@ class Substitutions(Transform):
def apply(self):
defs = self.document.substitution_defs
normed = self.document.substitution_names
for refname, refs in self.document.substitution_refs.items():
for ref in refs:
key = None
if defs.has_key(refname):
ref.parent.replace(ref, defs[refname].get_children())
key = refname
else:
normed_name = refname.lower()
if normed.has_key(normed_name):
key = normed[normed_name]
if key is None:
msg = self.document.reporter.error(
'Undefined substitution referenced: "%s".'
% refname, base_node=ref)
......@@ -682,6 +688,8 @@ class Substitutions(Transform):
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
else:
ref.parent.replace(ref, defs[key].get_children())
self.document.substitution_refs = None # release replaced references
......@@ -747,6 +755,8 @@ class TargetNotes(Transform):
self.document.note_autofootnote_ref(refnode)
self.document.note_footnote_ref(refnode)
index = ref.parent.index(ref) + 1
reflist = [nodes.Text(' '), refnode]
reflist = [refnode]
if not self.document.settings.trim_footnote_reference_space:
reflist.insert(0, nodes.Text(' '))
ref.parent.insert(index, reflist)
return footnote
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:17 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -102,7 +102,7 @@ class Messages(Transform):
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if len(messages) > 0:
if messages:
section = nodes.section(CLASS='system-messages')
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:00 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:30 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -20,8 +20,9 @@ from docutils import frontend, nodes
class SystemMessage(ApplicationError):
def __init__(self, system_message):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class Reporter:
......@@ -75,7 +76,7 @@ class Reporter:
"""List of names for system message levels, indexed by level."""
def __init__(self, source, report_level, halt_level, stream=None,
debug=0):
debug=0, encoding='ascii', error_handler='replace'):
"""
Initialize the `ConditionSet` forthe `Reporter`'s default category.
......@@ -90,6 +91,8 @@ class Reporter:
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing), or
`None` (implies `sys.stderr`; default).
- `encoding`: The encoding for stderr output.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
......@@ -99,6 +102,12 @@ class Reporter:
elif type(stream) in (StringType, UnicodeType):
raise NotImplementedError('This should open a file for writing.')
self.encoding = encoding
"""The character encoding for the stderr output."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.categories = {'': ConditionSet(debug, report_level, halt_level,
stream)}
"""Mapping of category names to conditions. Default category is ''."""
......@@ -107,6 +116,9 @@ class Reporter:
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
if stream is None:
......@@ -164,14 +176,16 @@ class Reporter:
*children, **attributes)
debug, report_level, halt_level, stream = self[category].astuple()
if level >= report_level or debug and level == 0:
msgtext = msg.astext().encode(self.encoding, self.error_handler)
if category:
print >>stream, msg.astext(), '[%s]' % category
print >>stream, msgtext, '[%s]' % category
else:
print >>stream, msg.astext()
print >>stream, msgtext
if level >= halt_level:
raise SystemMessage(msg)
raise SystemMessage(msg, level)
if level > 0 or debug:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
......@@ -368,10 +382,6 @@ def extract_name_value(line):
attlist.append((attname.lower(), data))
return attlist
def normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def new_document(source, settings=None):
"""
Return a new empty document object.
......@@ -385,7 +395,9 @@ def new_document(source, settings=None):
if settings is None:
settings = frontend.OptionParser().get_default_values()
reporter = Reporter(source, settings.report_level, settings.halt_level,
settings.warning_stream, settings.debug)
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source)
document.note_source(source, -1)
return document
......@@ -401,7 +413,7 @@ def clean_rcs_keywords(paragraph, keyword_substitutions):
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source`.
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
......@@ -426,7 +438,7 @@ def relative_path(source, target):
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
it's closest ancestor.
its closest ancestor.
"""
while node:
if node.source or node.line:
......
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -68,6 +68,7 @@ class Writer(Component):
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
......
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -18,6 +18,7 @@ __docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
from types import ListType
......@@ -57,7 +58,12 @@ class Writer(writers.Writer):
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<FORMAT>'}),
'metavar': '<format>'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of bullet lists '
'and enumerated lists, when list items are "simple" (i.e., all '
'items each contain one paragraph and/or one "simple" sublist '
......@@ -66,7 +72,10 @@ class Writer(writers.Writer):
{'default': 1, 'action': 'store_true'}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),))
{'dest': 'compact_lists', 'action': 'store_false'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'], {'dest': 'xml_declaration', 'default': 1,
'action': 'store_false'}),))
relative_path_settings = ('stylesheet_path',)
......@@ -157,14 +166,17 @@ class HTMLTranslator(nodes.NodeVisitor):
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.head_prefix = [
self.xml_declaration % settings.output_encoding,
self.doctype,
self.html_head % (lcode, lcode),
self.content_type % settings.output_encoding,
self.generator % docutils.__version__]
if settings.xml_declaration:
self.head_prefix.insert(0, self.xml_declaration
% settings.output_encoding)
self.head = []
if settings.embed_stylesheet:
stylesheet = self.get_stylesheet_reference(os.getcwd())
stylesheet = self.get_stylesheet_reference(
os.path.join(os.getcwd(), 'dummy'))
stylesheet_text = open(stylesheet).read()
self.stylesheet = [self.embedded_stylesheet % stylesheet_text]
else:
......@@ -185,6 +197,7 @@ class HTMLTranslator(nodes.NodeVisitor):
self.compact_p = 1
self.compact_simple = None
self.in_docinfo = None
self.in_sidebar = None
def get_stylesheet_reference(self, relative_to=None):
settings = self.settings
......@@ -196,9 +209,10 @@ class HTMLTranslator(nodes.NodeVisitor):
return settings.stylesheet
def astext(self):
return ''.join(self.head_prefix + self.head + self.stylesheet
+ self.body_prefix + self.body_pre_docinfo
+ self.docinfo + self.body + self.body_suffix)
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
......@@ -243,12 +257,12 @@ class HTMLTranslator(nodes.NodeVisitor):
# (But the XHTML (XML) spec says the opposite. <sigh>)
parts.append(name.lower())
elif isinstance(value, ListType):
values = [str(v) for v in value]
values = [unicode(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(str(value))))
self.attval(unicode(value))))
return '<%s%s>%s' % (' '.join(parts), infix, suffix)
def emptytag(self, node, tagname, suffix='\n', **attributes):
......@@ -261,6 +275,20 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=None)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
......@@ -269,12 +297,14 @@ class HTMLTranslator(nodes.NodeVisitor):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node, name):
self.body.append(self.starttag(node, 'div', CLASS=name))
self.body.append('<p class="admonition-title">'
+ self.language.labels[name] + '</p>\n')
def visit_admonition(self, node, name=''):
self.body.append(self.starttag(node, 'div',
CLASS=(name or 'admonition')))
if name:
self.body.append('<p class="admonition-title">'
+ self.language.labels[name] + '</p>\n')
def depart_admonition(self):
def depart_admonition(self, node=None):
self.body.append('</div>\n')
def visit_attention(self, node):
......@@ -283,6 +313,20 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_attention(self, node):
self.depart_admonition()
attribution_formats = {'dash': ('&mdash;', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
......@@ -483,7 +527,7 @@ class HTMLTranslator(nodes.NodeVisitor):
if len(node):
if isinstance(node[0], nodes.Element):
node[0].set_class('first')
if isinstance(node[0], nodes.Element):
if isinstance(node[-1], nodes.Element):
node[-1].set_class('last')
def depart_docinfo_item(self):
......@@ -605,7 +649,10 @@ class HTMLTranslator(nodes.NodeVisitor):
self.body.append(self.context.pop())
def visit_figure(self, node):
self.body.append(self.starttag(node, 'div', CLASS='figure'))
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %spx' % node['width']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
......@@ -699,6 +746,8 @@ class HTMLTranslator(nodes.NodeVisitor):
def visit_image(self, node):
atts = node.attributes.copy()
if atts.has_key('class'):
del atts['class'] # prevent duplication with node attrs
atts['src'] = atts['uri']
del atts['uri']
if not atts.has_key('alt'):
......@@ -719,6 +768,12 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_important(self, node):
self.depart_admonition()
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
......@@ -900,6 +955,12 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(self.starttag(node, 'div', CLASS='section'))
......@@ -908,6 +969,14 @@ class HTMLTranslator(nodes.NodeVisitor):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(self.starttag(node, 'div', CLASS='sidebar'))
self.in_sidebar = 1
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = None
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=None)
......@@ -920,6 +989,12 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
......@@ -928,10 +1003,22 @@ class HTMLTranslator(nodes.NodeVisitor):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
else:
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
def depart_subtitle(self, node):
self.body.append('</h2>\n')
self.body.append(self.context.pop())
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
if node['level'] < self.document.reporter['writer'].report_level:
......@@ -967,7 +1054,7 @@ class HTMLTranslator(nodes.NodeVisitor):
a_start = a_end = ''
self.body.append('System Message: %s%s/%s%s (<tt>%s</tt>%s)%s</p>\n'
% (a_start, node['type'], node['level'], a_end,
node['source'], line, backref_text))
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
......@@ -1036,15 +1123,19 @@ class HTMLTranslator(nodes.NodeVisitor):
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title'))
if node.parent.hasattr('id'):
self.body.append(
self.starttag({}, 'a', '', name=node.parent['id']))
self.context.append('</a></p>\n')
else:
self.context.append('</p>\n')
check_id = 1
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
check_id = 1
elif isinstance(node.parent, nodes.admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
check_id = 1
elif self.section_level == 0:
# document title
self.head.append('<title>%s</title>\n'
......@@ -1062,6 +1153,13 @@ class HTMLTranslator(nodes.NodeVisitor):
atts['href'] = '#' + node['refid']
self.body.append(self.starttag({}, 'a', '', **atts))
self.context.append('</a></h%s>\n' % (self.section_level))
if check_id:
if node.parent.hasattr('id'):
self.body.append(
self.starttag({}, 'a', '', name=node.parent['id']))
self.context.append('</a></p>\n')
else:
self.context.append('</p>\n')
def depart_title(self, node):
self.body.append(self.context.pop())
......
# Author: Andreas Jung
# Contact: andreas@andreas-jung.com
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......
"""
:Author: Engelbert Gruber
:Contact: grubert@users.sourceforge.net
:Revision: $Revision: 1.1 $
:Date: $Date: 2003/07/10 15:50:05 $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
"""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks too all.
# some named: David Abrahams, Julien Letessier, who is missing.
#
# convention deactivate code by two # e.g. ##.
import sys
import time
import re
import string
from types import ListType
from docutils import writers, nodes, languages
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
settings_spec = (
'LaTeX-Specific Options',
'The LaTeX "--output-encoding" default is "latin-1:strict".',
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify a stylesheet file. The file will be "input" by latex '
'in the document header. Default is "style.tex". '
'If this is set to "" disables input.'
'Overridden by --stylesheet-path.',
['--stylesheet'],
{'default': 'style.tex', 'metavar': '<file>'}),
('Specify a stylesheet file, relative to the current working '
'directory.'
'Overrides --stylesheet.',
['--stylesheet-path'],
{'metavar': '<file>'}),
('Link to the stylesheet in the output LaTeX file. This is the '
'default.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet in the output LaTeX file. The stylesheet '
'file must be accessible during processing (--stylesheet-path is '
'recommended).',
['--embed-stylesheet'],
{'action': 'store_true'}),
('Table of contents by docutils (default) or latex. Latex(writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.',
['--use-latex-toc'], {'default': 0}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),))
settings_defaults = {'output_encoding': 'latin-1'}
output = None
"""Final translated form of `document`."""
def translate(self):
visitor = LaTeXTranslator(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
self.head_prefix = visitor.head_prefix
self.head = visitor.head
self.body_prefix = visitor.body_prefix
self.body = visitor.body
self.body_suffix = visitor.body_suffix
"""
Notes on LaTeX
--------------
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
"""
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self,lang):
self.language = lang
# pdflatex does not produce double quotes for ngerman in tt.
self.double_quote_replacment = None
if re.search('^de',self.language):
# maybe use: {\glqq} {\grqq}.
self.quotes = ("\"`", "\"'")
self.double_quote_replacment = "{\\dq}"
else:
self.quotes = ("``", "''")
self.quote_index = 0
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1)%2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def double_quotes_in_tt (self,text):
if not self.double_quote_replacment:
return text
return text.replace('"', self.double_quote_replacment)
def get_language(self):
if self._ISO639_TO_BABEL.has_key(self.language):
return self._ISO639_TO_BABEL[self.language]
else:
# support dialects.
l = self.language.split("_")[0]
if self._ISO639_TO_BABEL.has_key(l):
return self._ISO639_TO_BABEL[l]
return None
latex_headings = {
'optionlist_environment' : [
'\\newcommand{\\optionlistlabel}[1]{\\bf #1 \\hfill}\n'
'\\newenvironment{optionlist}[1]\n'
'{\\begin{list}{}\n'
' {\\setlength{\\labelwidth}{#1}\n'
' \\setlength{\\rightmargin}{1cm}\n'
' \\setlength{\\leftmargin}{\\rightmargin}\n'
' \\addtolength{\\leftmargin}{\\labelwidth}\n'
' \\addtolength{\\leftmargin}{\\labelsep}\n'
' \\renewcommand{\\makelabel}{\\optionlistlabel}}\n'
'}{\\end{list}}\n',
],
'footnote_floats' : [
'% begin: floats for footnotes tweaking.\n',
'\\setlength{\\floatsep}{0.5em}\n',
'\\setlength{\\textfloatsep}{\\fill}\n',
'\\addtolength{\\textfloatsep}{3em}\n',
'\\renewcommand{\\textfraction}{0.5}\n',
'\\renewcommand{\\topfraction}{0.5}\n',
'\\renewcommand{\\bottomfraction}{0.5}\n',
'\\setcounter{totalnumber}{50}\n',
'\\setcounter{topnumber}{50}\n',
'\\setcounter{bottomnumber}{50}\n',
'% end floats for footnotes\n',
],
'some_commands' : [
'% some commands, that could be overwritten in the style file.\n'
'\\newcommand{\\rubric}[1]'
'{\\subsection*{~\\hfill {\\it #1} \\hfill ~}}\n'
'% end of "some commands"\n',
]
}
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
d_options = '10pt' # papersize, fontsize
d_paper = 'a4paper'
d_margins = '2cm'
latex_head = '\\documentclass[%s]{%s}\n'
encoding = '\\usepackage[%s]{inputenc}\n'
linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n'
geometry = '\\usepackage[%s,margin=%s,nohead]{geometry}\n'
stylesheet = '\\input{%s}\n'
# add a generated on day , machine by user using docutils version.
generator = '%% generator Docutils: http://docutils.sourceforge.net/\n'
# use latex tableofcontents or let docutils do it.
use_latex_toc = 0
# table kind: if 0 tabularx (single page), 1 longtable
# maybe should be decided on row count.
use_longtable = 1
# TODO: use mixins for different implementations.
# list environment for option-list. else tabularx
use_optionlist_for_option_list = 1
# list environment for docinfo. else tabularx
use_optionlist_for_docinfo = 0 # NOT YET IN USE
# default link color
hyperlink_color = "blue"
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
self.use_latex_toc = settings.use_latex_toc
self.hyperlink_color = settings.hyperlink_color
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# language: labels, bibliographic_fields, and author_separators.
# to allow writing labes for specific languages.
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
if self.babel.get_language():
self.d_options += ',%s' % \
self.babel.get_language()
self.head_prefix = [
self.latex_head % (self.d_options,self.settings.documentclass),
'\\usepackage{babel}\n', # language is in documents settings.
'\\usepackage{shortvrb}\n', # allows verb in footnotes.
self.encoding % self.to_latex_encoding(settings.output_encoding),
# * tabularx: for docinfo, automatic width of columns, always on one page.
'\\usepackage{tabularx}\n',
'\\usepackage{longtable}\n',
# possible other packages.
# * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks).
# but ??
#
# extra space between text in tables and the line above them
'\\setlength{\\extrarowheight}{2pt}\n',
'\\usepackage{amsmath}\n', # what fore amsmath.
'\\usepackage{graphicx}\n',
'\\usepackage{color}\n',
'\\usepackage{multirow}\n',
self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color),
# geometry and fonts might go into style.tex.
self.geometry % (self.d_paper, self.d_margins),
#
self.generator,
# latex lengths
'\\newlength{\\admonitionwidth}\n',
'\\setlength{\\admonitionwidth}{0.9\\textwidth}\n'
# width for docinfo tablewidth
'\\newlength{\\docinfowidth}\n',
'\\setlength{\\docinfowidth}{0.9\\textwidth}\n'
]
self.head_prefix.extend( latex_headings['optionlist_environment'] )
self.head_prefix.extend( latex_headings['footnote_floats'] )
self.head_prefix.extend( latex_headings['some_commands'] )
## stylesheet is last: so it might be possible to overwrite defaults.
stylesheet = self.get_stylesheet_reference()
if stylesheet:
self.head_prefix.append(self.stylesheet % (stylesheet))
if self.linking: # and maybe check for pdf
self.pdfinfo = [ ]
self.pdfauthor = None
# pdftitle, pdfsubject, pdfauthor, pdfkeywords, pdfcreator, pdfproducer
else:
self.pdfinfo = None
# NOTE: Latex wants a date and an author, rst puts this into
# docinfo, so normally we donot want latex author/date handling.
# latex article has its own handling of date and author, deactivate.
self.latex_docinfo = 0
self.head = [ ]
if not self.latex_docinfo:
self.head.extend( [ '\\author{}\n', '\\date{}\n' ] )
self.body_prefix = ['\\raggedbottom\n']
# separate title, so we can appen subtitle.
self.title = ""
self.body = []
self.body_suffix = ['\n']
self.section_level = 0
self.context = []
self.topic_class = ''
# column specification for tables
self.colspecs = []
# Flags to encode
# ---------------
# verbatim: to tell encode not to encode.
self.verbatim = 0
# insert_newline: to tell encode to replace blanks by "~".
self.insert_none_breaking_blanks = 0
# insert_newline: to tell encode to add latex newline.
self.insert_newline = 0
# mbox_newline: to tell encode to add mbox and newline.
self.mbox_newline = 0
# enumeration is done by list environment.
self._enum_cnt = 0
# docinfo.
self.docinfo = None
# inside literal block: no quote mangling.
self.literal_block = 0
self.literal = 0
def get_stylesheet_reference(self):
if self.settings.stylesheet_path:
return self.settings.stylesheet_path
else:
return self.settings.stylesheet
def to_latex_encoding(self,docutils_encoding):
"""
Translate docutils encoding name into latex's.
Default fallback method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { "iso-8859-1": "latin1", # west european
"iso-8859-2": "latin2", # east european
"iso-8859-3": "latin3", # esperanto, maltese
"iso-8859-4": "latin4", # north european,scandinavian, baltic
"iso-8859-5": "iso88595", # cyrillic (ISO)
"iso-8859-9": "latin5", # turkish
"iso-8859-15": "latin9", # latin9, update to latin1.
"mac_cyrillic": "maccyr", # cyrillic (on Mac)
"windows-1251": "cp1251", # cyrillic (on Windows)
"koi8-r": "koi8-r", # cyrillic (Russian)
"koi8-u": "koi8-u", # cyrillic (Ukrainian)
"windows-1250": "cp1250", #
"windows-1252": "cp1252", #
"us-ascii": "ascii", # ASCII (US)
# unmatched encodings
#"": "applemac",
#"": "ansinew", # windows 3.1 ansi
#"": "ascii", # ASCII encoding for the range 32--127.
#"": "cp437", # dos latine us
#"": "cp850", # dos latin 1
#"": "cp852", # dos latin 2
#"": "decmulti",
#"": "latin10",
#"iso-8859-6": "" # arabic
#"iso-8859-7": "" # greek
#"iso-8859-8": "" # hebrew
#"iso-8859-10": "" # latin6, more complete iso-8859-4
}
if tr.has_key(docutils_encoding.lower()):
return tr[docutils_encoding.lower()]
return docutils_encoding.translate(string.maketrans("",""),"_-").lower()
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
def encode(self, text):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^ \ { }
Escaping with a backslash does not help with backslashes, ~ and ^.
< > are only available in math-mode (really ?)
$ starts math- mode.
AND quotes:
"""
if self.verbatim:
return text
# compile the regexps once. do it here so one can see them.
#
# first the braces.
if not self.__dict__.has_key('encode_re_braces'):
self.encode_re_braces = re.compile(r'([{}])')
text = self.encode_re_braces.sub(r'{\\\1}',text)
if not self.__dict__.has_key('encode_re_bslash'):
# find backslash: except in the form '{\{}' or '{\}}'.
self.encode_re_bslash = re.compile(r'(?<!{)(\\)(?![{}]})')
# then the backslash: except in the form from line above:
# either '{\{}' or '{\}}'.
text = self.encode_re_bslash.sub(r'{\\textbackslash}', text)
# then dollar
text = text.replace("$", '{\\$}')
# then all that needs math mode
text = text.replace("<", '{$<$}')
text = text.replace(">", '{$>$}')
# then
text = text.replace("&", '{\\&}')
text = text.replace("_", '{\\_}')
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
text = text.replace("^", '{\\ensuremath{^\\wedge}}')
text = text.replace("%", '{\\%}')
text = text.replace("#", '{\\#}')
text = text.replace("~", '{\\~{ }}')
if self.literal_block or self.literal:
# pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text)
else:
text = self.babel.quote_quotes(text)
if self.insert_newline:
# HACK: insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline:
text = text.replace("\n", '}\\\\\n\\mbox{')
if self.insert_none_breaking_blanks:
text = text.replace(' ', '~')
# unicode !!!
text = text.replace(u'\u2020', '{$\\dagger$}')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
def astext(self):
if self.pdfinfo:
if self.pdfauthor:
self.pdfinfo.append('pdfauthor={%s}' % self.pdfauthor)
pdfinfo = '\\hypersetup{\n' + ',\n'.join(self.pdfinfo) + '\n}\n'
else:
pdfinfo = ''
title = '\\title{%s}\n' % self.title
return ''.join(self.head_prefix + [title]
+ self.head + [pdfinfo]
+ self.body_prefix + self.body + self.body_suffix)
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node, name):
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\parbox{\\admonitionwidth}{\n')
self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n');
self.body.append('\\vspace{2mm}\n')
def depart_admonition(self):
self.body.append('}}\n') # end parbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition()
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# ignore. visit_author is called for each one
# self.visit_docinfo_item(node, 'author')
pass
def depart_authors(self, node):
# self.depart_docinfo_item(node)
pass
def visit_block_quote(self, node):
self.body.append( '\\begin{quote}\n')
def depart_block_quote(self, node):
self.body.append( '\\end{quote}\n')
def visit_bullet_list(self, node):
if not self.use_latex_toc and self.topic_class == 'contents':
self.body.append( '\\begin{list}{}{}\n' )
else:
self.body.append( '\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if not self.use_latex_toc and self.topic_class == 'contents':
self.body.append( '\\end{list}\n' )
else:
self.body.append( '\\end{itemize}\n' )
def visit_caption(self, node):
self.body.append( '\\caption{' )
def depart_caption(self, node):
self.body.append('}')
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition()
def visit_citation(self, node):
self.visit_footnote(node)
def depart_citation(self, node):
self.depart_footnote(node)
def visit_title_reference(self, node):
# BUG title-references are what?
pass
def depart_title_reference(self, node):
pass
def visit_citation_reference(self, node):
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
self.body.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
self.body.append('}]')
def visit_classifier(self, node):
self.body.append( '(\\textbf{' )
def depart_classifier(self, node):
self.body.append( '})\n' )
def visit_colspec(self, node):
if self.use_longtable:
self.colspecs.append(node)
else:
self.context[-1] += 1
def depart_colspec(self, node):
pass
def visit_comment(self, node,
sub=re.compile('\n').sub):
"""Escape end of line by a ne comment start in comment text."""
self.body.append('%% %s \n' % sub('\n% ', node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('%[visit_definition]\n')
def depart_definition(self, node):
self.body.append('\n')
self.body.append('%[depart_definition]\n')
def visit_definition_list(self, node):
self.body.append( '\\begin{description}\n' )
def depart_definition_list(self, node):
self.body.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
self.body.append('%[visit_definition_list_item]\n')
def depart_definition_list_item(self, node):
self.body.append('%[depart_definition_list_item]\n')
def visit_description(self, node):
if self.use_optionlist_for_option_list:
self.body.append( ' ' )
else:
self.body.append( ' & ' )
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.docinfo = []
self.docinfo.append('%' + '_'*75 + '\n')
self.docinfo.append('\\begin{center}\n')
self.docinfo.append('\\begin{tabularx}{\\docinfowidth}{lX}\n')
def depart_docinfo(self, node):
self.docinfo.append('\\end{tabularx}\n')
self.docinfo.append('\\end{center}\n')
self.body = self.docinfo + self.body
# clear docinfo, so field names are no longer appended.
self.docinfo = None
if self.use_latex_toc:
self.body.append('\\tableofcontents\n\n\\bigskip\n')
def visit_docinfo_item(self, node, name):
if not self.latex_docinfo:
self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'author':
if not self.pdfinfo == None:
if not self.pdfauthor:
self.pdfauthor = self.attval(node.astext())
else:
self.pdfauthor += self.author_separator + self.attval(node.astext())
if self.latex_docinfo:
self.head.append('\\author{%s}\n' % self.attval(node.astext()))
raise nodes.SkipNode
elif name == 'date':
if self.latex_docinfo:
self.head.append('\\date{%s}\n' % self.attval(node.astext()))
raise nodes.SkipNode
if name == 'address':
# BUG will fail if latex_docinfo is set.
self.insert_newline = 1
self.docinfo.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
self.context.append(self.docinfo)
self.context.append(len(self.body))
def depart_docinfo_item(self, node):
size = self.context.pop()
dest = self.context.pop()
tail = self.context.pop()
tail = self.body[size:] + [tail]
del self.body[size:]
dest.extend(tail)
# for address we did set insert_newline
self.insert_newline = 0
def visit_doctest_block(self, node):
self.body.append( '\\begin{verbatim}' )
self.verbatim = 1
def depart_doctest_block(self, node):
self.body.append( '\\end{verbatim}\n' )
self.verbatim = 0
def visit_document(self, node):
self.body_prefix.append('\\begin{document}\n')
self.body_prefix.append('\\maketitle\n\n')
# alternative use titlepage environment.
# \begin{titlepage}
def depart_document(self, node):
self.body_suffix.append('\\end{document}\n')
def visit_emphasis(self, node):
self.body.append('\\emph{')
def depart_emphasis(self, node):
self.body.append('}')
def visit_entry(self, node):
# cell separation
column_one = 1
if self.context[-1] > 0:
column_one = 0
if not column_one:
self.body.append(' & ')
# multi{row,column}
if node.has_key('morerows') and node.has_key('morecols'):
raise NotImplementedError('LaTeX can\'t handle cells that'
'span multiple rows *and* columns, sorry.')
atts = {}
if node.has_key('morerows'):
count = node['morerows'] + 1
self.body.append('\\multirow{%d}*{' % count)
self.context.append('}')
elif node.has_key('morecols'):
# the vertical bar before column is missing if it is the first column.
# the one after always.
if column_one:
bar = '|'
else:
bar = ''
count = node['morecols'] + 1
self.body.append('\\multicolumn{%d}{%sl|}{' % (count, bar))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.body.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.body.append(self.context.pop()) # header / not header
self.body.append(self.context.pop()) # multirow/column
self.context[-1] += 1
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
self._enum_cnt += 1
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ""
if node.has_key('suffix'):
enum_suffix = node['suffix']
enum_prefix = ""
if node.has_key('prefix'):
enum_prefix = node['prefix']
enum_type = "arabic"
if node.has_key('enumtype'):
enum_type = node['enumtype']
if enum_style.has_key(enum_type):
enum_type = enum_style[enum_type]
counter_name = "listcnt%d" % self._enum_cnt;
self.body.append('\\newcounter{%s}\n' % counter_name)
self.body.append('\\begin{list}{%s\\%s{%s}%s}\n' % \
(enum_prefix,enum_type,counter_name,enum_suffix))
self.body.append('{\n')
self.body.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if node.has_key('start'):
self.body.append('\\addtocounter{%s}{%d}\n' \
% (counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.body.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.body.append('}\n')
def depart_enumerated_list(self, node):
self.body.append('\\end{list}\n')
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.body.append('\n')
##self.body.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.body.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.body.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
# BUG by attach as text we loose references.
if self.docinfo:
self.docinfo.append('%s \\\\\n' % node.astext())
raise nodes.SkipNode
# BUG: what happens if not docinfo
def depart_field_body(self, node):
self.body.append( '\n' )
def visit_field_list(self, node):
if not self.docinfo:
self.body.append('\\begin{quote}\n')
self.body.append('\\begin{description}\n')
def depart_field_list(self, node):
if not self.docinfo:
self.body.append('\\end{description}\n')
self.body.append('\\end{quote}\n')
def visit_field_name(self, node):
# BUG this duplicates docinfo_item
if self.docinfo:
self.docinfo.append('\\textbf{%s}: &\n\t' % node.astext())
raise nodes.SkipNode
else:
self.body.append('\\item [')
def depart_field_name(self, node):
if not self.docinfo:
self.body.append(':]')
def visit_figure(self, node):
self.body.append( '\\begin{figure}\n' )
def depart_figure(self, node):
self.body.append( '\\end{figure}\n' )
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = (['\n\\begin{center}\small\n']
+ self.body[start:] + ['\n\\end{center}\n'])
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
notename = node['id']
self.body.append('\\begin{figure}[b]')
self.body.append('\\hypertarget{%s}' % notename)
def depart_footnote(self, node):
self.body.append('\\end{figure}\n')
def visit_footnote_reference(self, node):
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
elif format == 'superscript':
suffix = '\\raisebox{.5em}[0em]{\\scriptsize'
self.context.append('}')
else: # shouldn't happen
raise AssertionError('Illegal footnote reference format.')
self.body.append('%s\\hyperlink{%s}{' % (suffix,href))
def depart_footnote_reference(self, node):
self.body.append('}%s' % self.context.pop())
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
self.body_prefix.append('\n\\verb|begin_header|\n')
self.body_prefix.extend(self.body[start:])
self.body_prefix.append('\n\\verb|end_header|\n')
del self.body[start:]
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition()
def visit_image(self, node):
atts = node.attributes.copy()
href = atts['uri']
##self.body.append('\\begin{center}\n')
self.body.append('\n\\includegraphics{%s}\n' % href)
##self.body.append('\\end{center}\n')
def depart_image(self, node):
pass
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition()
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_label(self, node):
# footnote/citation label
self.body.append('[')
def depart_label(self, node):
self.body.append(']')
def visit_legend(self, node):
self.body.append('{\\small ')
def depart_legend(self, node):
self.body.append('}')
def visit_line_block(self, node):
"""line-block:
* whitespace (including linebreaks) is significant
* inline markup is supported.
* serif typeface
"""
self.body.append('\\begin{flushleft}\n')
self.insert_none_breaking_blanks = 1
self.line_block_without_mbox = 1
if self.line_block_without_mbox:
self.insert_newline = 1
else:
self.mbox_newline = 1
self.body.append('\\mbox{')
def depart_line_block(self, node):
if self.line_block_without_mbox:
self.insert_newline = 0
else:
self.body.append('}')
self.mbox_newline = 0
self.insert_none_breaking_blanks = 0
self.body.append('\n\\end{flushleft}\n')
def visit_list_item(self, node):
self.body.append('\\item ')
def depart_list_item(self, node):
self.body.append('\n')
def visit_literal(self, node):
self.literal = 1
self.body.append('\\texttt{')
def depart_literal(self, node):
self.body.append('}')
self.literal = 0
def visit_literal_block(self, node):
"""
.. parsed-literal::
"""
# typically in a typewriter/monospaced typeface.
# care must be taken with the text, because inline markup is recognized.
#
# possibilities:
# * verbatim: is no possibility, as inline markup does not work.
# * obey..: is from julien and never worked for me (grubert).
self.use_for_literal_block = "mbox"
self.literal_block = 1
if (self.use_for_literal_block == "mbox"):
self.mbox_newline = 1
self.insert_none_breaking_blanks = 1
self.body.append('\\begin{ttfamily}\\begin{flushleft}\n\\mbox{')
else:
self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
def depart_literal_block(self, node):
if (self.use_for_literal_block == "mbox"):
self.body.append('}\n\\end{flushleft}\\end{ttfamily}\n')
self.insert_none_breaking_blanks = 0
self.mbox_newline = 0
else:
self.body.append('}\n')
self.literal_block = 0
def visit_meta(self, node):
self.body.append('[visit_meta]\n')
# BUG maybe set keywords for pdf
##self.head.append(self.starttag(node, 'meta', **node.attributes))
def depart_meta(self, node):
self.body.append('[depart_meta]\n')
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition()
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\item [')
else:
atts = {}
if len(node.astext()) > 14:
self.body.append('\\multicolumn{2}{l}{')
self.context.append('} \\\\\n ')
else:
self.context.append('')
self.body.append('\\texttt{')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
if self.use_optionlist_for_option_list:
self.body.append('] ')
else:
self.body.append('}')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append('% [option list]\n')
if self.use_optionlist_for_option_list:
self.body.append('\\begin{optionlist}{3cm}\n')
else:
self.body.append('\\begin{center}\n')
# BUG: use admwidth or make it relative to textwidth ?
self.body.append('\\begin{tabularx}{.9\\linewidth}{lX}\n')
def depart_option_list(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\end{optionlist}\n')
else:
self.body.append('\\end{tabularx}\n')
self.body.append('\\end{center}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
if not self.use_optionlist_for_option_list:
self.body.append('\\\\\n')
def visit_option_string(self, node):
##self.body.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.body.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
if not self.topic_class == 'contents':
self.body.append('\n')
def depart_paragraph(self, node):
if self.topic_class == 'contents':
self.body.append('\n')
else:
self.body.append('\n')
def visit_problematic(self, node):
self.body.append('{\\color{red}\\bfseries{}')
def depart_problematic(self, node):
self.body.append('}')
def visit_raw(self, node):
if node.has_key('format') and node['format'].lower() == 'latex':
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
# for pdflatex hyperrefs might be supported
if node.has_key('refuri'):
href = node['refuri']
elif node.has_key('refid'):
href = '#' + node['refid']
elif node.has_key('refname'):
href = '#' + self.document.nameids[node['refname']]
##self.body.append('[visit_reference]')
self.body.append('\\href{%s}{' % href)
def depart_reference(self, node):
self.body.append('}')
##self.body.append('[depart_reference]')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_row(self, node):
self.context.append(0)
def depart_row(self, node):
self.context.pop() # remove cell counter
self.body.append(' \\\\ \\hline\n')
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_sidebar(self, node):
# BUG: this is just a hack to make sidebars render something
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\colorbox[gray]{0.80}{\\parbox{\\admonitionwidth}{\n')
def depart_sidebar(self, node):
self.body.append('}}}\n') # end parbox colorbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.body.append('\n\\begin{flushright}\n')
self.body.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.body.append(self.context.pop() + '\n')
self.body.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.body.append('\\textbf{')
def depart_strong(self, node):
self.body.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append('~\\\\\n\\textbf{')
self.context.append('}\n\\smallskip\n')
else:
self.title = self.title + \
'\\\\\n\\large{%s}\n' % self.encode(node.astext())
raise nodes.SkipNode
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.context.pop())
def visit_system_message(self, node):
if node['level'] < self.document.reporter['writer'].report_level:
raise nodes.SkipNode
def depart_system_message(self, node):
self.body.append('\n')
def get_colspecs(self):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self.colspecs:
colwidth = float(node['colwidth']) / width
total_width += colwidth
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
latex_table_spec = ""
for node in self.colspecs:
colwidth = factor * float(node['colwidth']) / width
latex_table_spec += "|p{%.2f\\linewidth}" % colwidth
self.colspecs = []
return latex_table_spec+"|"
def visit_table(self, node):
if self.use_longtable:
self.body.append('\n\\begin{longtable}[c]')
else:
self.body.append('\n\\begin{tabularx}{\\linewidth}')
self.context.append('table_sentinel') # sentinel
self.context.append(0) # column counter
def depart_table(self, node):
if self.use_longtable:
self.body.append('\\end{longtable}\n')
else:
self.body.append('\\end{tabularx}\n')
sentinel = self.context.pop()
if sentinel != 'table_sentinel':
print 'context:', self.context + [sentinel]
raise AssertionError
def table_preamble(self):
if self.use_longtable:
self.body.append('{%s}\n' % self.get_colspecs())
else:
if self.context[-1] != 'table_sentinel':
self.body.append('{%s}' % ('|X' * self.context.pop() + '|'))
self.body.append('\n\\hline')
def visit_target(self, node):
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
self.body.append('\\hypertarget{%s}{' % node['name'])
self.context.append('}')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if self.colspecs:
self.visit_thead(None)
self.depart_thead(None)
self.body.append('%[visit_tbody]\n')
def depart_tbody(self, node):
self.body.append('%[depart_tbody]\n')
def visit_term(self, node):
self.body.append('\\item[')
def depart_term(self, node):
# definition list term.
self.body.append(':]\n')
def visit_tgroup(self, node):
#self.body.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# number_of_columns will be zero after get_colspecs.
# BUG ! push onto context for depart to pop it.
number_of_columns = len(self.colspecs)
self.table_preamble()
#BUG longtable needs firstpage and lastfooter too.
self.body.append('\\hline\n')
def depart_thead(self, node):
if self.use_longtable:
# the table header written should be on every page
# => \endhead
self.body.append('\\endhead\n')
# and the firsthead => \endfirsthead
# BUG i want a "continued from previous page" on every not
# firsthead, but then we need the header twice.
#
# there is a \endfoot and \endlastfoot too.
# but we need the number of columns to
# self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
# self.body.append('\\hline\n\\endfoot\n')
# self.body.append('\\hline\n')
# self.body.append('\\endlastfoot\n')
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition()
def visit_title(self, node):
"""Only 3 section levels are supported by LaTeX article (AFAIR)."""
if isinstance(node.parent, nodes.topic):
# section titles before the table of contents.
if node.parent.hasattr('id'):
self.body.append('\\hypertarget{%s}{}' % node.parent['id'])
# BUG: latex chokes on center environment with "perhaps a missing item".
# so we use hfill.
self.body.append('\\subsection*{~\\hfill ')
# the closing brace for subsection.
self.context.append('\\hfill ~}\n')
elif isinstance(node.parent, nodes.sidebar):
self.body.append('\\textbf{\\large ')
self.context.append('}\n\\smallskip\n')
elif self.section_level == 0:
# document title
self.title = self.encode(node.astext())
if not self.pdfinfo == None:
self.pdfinfo.append( 'pdftitle={%s}' % self.encode(node.astext()) )
raise nodes.SkipNode
else:
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\n')
if node.parent.hasattr('id'):
self.body.append('\\hypertarget{%s}{}\n' % node.parent['id'])
# section_level 0 is title and handled above.
# BUG: latex has no deeper sections (actually paragrah is no section either).
if self.use_latex_toc:
section_star = ""
else:
section_star = "*"
if (self.section_level<=3): # 1,2,3
self.body.append('\\%ssection%s{' % ('sub'*(self.section_level-1),section_star))
elif (self.section_level==4):
#self.body.append('\\paragraph*{')
self.body.append('\\subsubsection%s{' % (section_star))
else:
#self.body.append('\\subparagraph*{')
self.body.append('\\subsubsection%s{' % (section_star))
# BUG: self.body.append( '\\label{%s}\n' % name)
self.context.append('}\n')
def depart_title(self, node):
self.body.append(self.context.pop())
if isinstance(node.parent, nodes.sidebar):
return
# BUG level depends on style.
elif node.parent.hasattr('id') and not self.use_latex_toc:
# pdflatex allows level 0 to 3
# ToC would be the only on level 0 so i choose to decrement the rest.
# "Table of contents" bookmark to see the ToC. To avoid this
# we set all zeroes to one.
l = self.section_level
if l>0:
l = l-1
self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
(l,node.astext(),node.parent['id']))
def visit_topic(self, node):
self.topic_class = node.get('class')
if self.use_latex_toc:
self.topic_class = ''
raise nodes.SkipNode
def depart_topic(self, node):
self.topic_class = ''
self.body.append('\n')
def visit_rubric(self, node):
# self.body.append('\\hfill {\\color{red}\\bfseries{}')
# self.context.append('} \\hfill ~\n')
self.body.append('\\rubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.body.append(self.context.pop())
def visit_transition(self, node):
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\\hspace*{\\fill}\\hrulefill\\hspace*{\\fill}')
self.body.append('\n\n')
def depart_transition(self, node):
#self.body.append('[depart_transition]')
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......@@ -13,7 +13,7 @@ __docformat__ = 'reStructuredText'
import sys
import docutils
from docutils import nodes, optik, utils
from docutils import nodes, frontend, utils
from docutils.writers import html4css1
......@@ -44,7 +44,7 @@ class Writer(html4css1.Writer):
{'default': '.', 'metavar': '<URL>'}),
# Workaround for SourceForge's broken Python
# (``import random`` causes a segfault).
(optik.SUPPRESS_HELP,
(frontend.SUPPRESS_HELP,
['--no-random'], {'action': 'store_true'}),))
settings_default_overrides = {'footnote_references': 'brackets'}
......
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.2 $
# Date: $Date: 2003/02/01 09:26:20 $
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment