grammar.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. """A convenience which constructs expression trees from an easy-to-read syntax
  2. Use this unless you have a compelling reason not to; it performs some
  3. optimizations that would be tedious to do when constructing an expression tree
  4. by hand.
  5. """
  6. from collections import OrderedDict
  7. from textwrap import dedent
  8. from parsimonious.exceptions import BadGrammar, UndefinedLabel
  9. from parsimonious.expressions import (Literal, Regex, Sequence, OneOf,
  10. Lookahead, Quantifier, Optional, ZeroOrMore, OneOrMore, Not, TokenMatcher,
  11. expression, is_callable)
  12. from parsimonious.nodes import NodeVisitor
  13. from parsimonious.utils import evaluate_string
  14. class Grammar(OrderedDict):
  15. """A collection of rules that describe a language
  16. You can start parsing from the default rule by calling ``parse()``
  17. directly on the ``Grammar`` object::
  18. g = Grammar('''
  19. polite_greeting = greeting ", my good " title
  20. greeting = "Hi" / "Hello"
  21. title = "madam" / "sir"
  22. ''')
  23. g.parse('Hello, my good sir')
  24. Or start parsing from any of the other rules; you can pull them out of the
  25. grammar as if it were a dictionary::
  26. g['title'].parse('sir')
  27. You could also just construct a bunch of ``Expression`` objects yourself
  28. and stitch them together into a language, but using a ``Grammar`` has some
  29. important advantages:
  30. * Languages are much easier to define in the nice syntax it provides.
  31. * Circular references aren't a pain.
  32. * It does all kinds of whizzy space- and time-saving optimizations, like
  33. factoring up repeated subexpressions into a single object, which should
  34. increase cache hit ratio. [Is this implemented yet?]
  35. """
  36. def __init__(self, rules='', **more_rules):
  37. """Construct a grammar.
  38. :arg rules: A string of production rules, one per line.
  39. :arg default_rule: The name of the rule invoked when you call
  40. :meth:`parse()` or :meth:`match()` on the grammar. Defaults to the
  41. first rule. Falls back to None if there are no string-based rules
  42. in this grammar.
  43. :arg more_rules: Additional kwargs whose names are rule names and
  44. values are Expressions or custom-coded callables which accomplish
  45. things the built-in rule syntax cannot. These take precedence over
  46. ``rules`` in case of naming conflicts.
  47. """
  48. decorated_custom_rules = {
  49. k: (expression(v, k, self) if is_callable(v) else v)
  50. for k, v in more_rules.items()}
  51. exprs, first = self._expressions_from_rules(rules, decorated_custom_rules)
  52. super().__init__(exprs.items())
  53. self.default_rule = first # may be None
  54. def default(self, rule_name):
  55. """Return a new Grammar whose :term:`default rule` is ``rule_name``."""
  56. new = self._copy()
  57. new.default_rule = new[rule_name]
  58. return new
  59. def _copy(self):
  60. """Return a shallow copy of myself.
  61. Deep is unnecessary, since Expression trees are immutable. Subgrammars
  62. recreate all the Expressions from scratch, and AbstractGrammars have
  63. no Expressions.
  64. """
  65. new = Grammar.__new__(Grammar)
  66. super(Grammar, new).__init__(self.items())
  67. new.default_rule = self.default_rule
  68. return new
  69. def _expressions_from_rules(self, rules, custom_rules):
  70. """Return a 2-tuple: a dict of rule names pointing to their
  71. expressions, and then the first rule.
  72. It's a web of expressions, all referencing each other. Typically,
  73. there's a single root to the web of references, and that root is the
  74. starting symbol for parsing, but there's nothing saying you can't have
  75. multiple roots.
  76. :arg custom_rules: A map of rule names to custom-coded rules:
  77. Expressions
  78. """
  79. tree = rule_grammar.parse(rules)
  80. return RuleVisitor(custom_rules).visit(tree)
  81. def parse(self, text, pos=0):
  82. """Parse some text with the :term:`default rule`.
  83. :arg pos: The index at which to start parsing
  84. """
  85. self._check_default_rule()
  86. return self.default_rule.parse(text, pos=pos)
  87. def match(self, text, pos=0):
  88. """Parse some text with the :term:`default rule` but not necessarily
  89. all the way to the end.
  90. :arg pos: The index at which to start parsing
  91. """
  92. self._check_default_rule()
  93. return self.default_rule.match(text, pos=pos)
  94. def _check_default_rule(self):
  95. """Raise RuntimeError if there is no default rule defined."""
  96. if not self.default_rule:
  97. raise RuntimeError("Can't call parse() on a Grammar that has no "
  98. "default rule. Choose a specific rule instead, "
  99. "like some_grammar['some_rule'].parse(...).")
  100. def __str__(self):
  101. """Return a rule string that, when passed to the constructor, would
  102. reconstitute the grammar."""
  103. exprs = [self.default_rule] if self.default_rule else []
  104. exprs.extend(expr for expr in self.values() if
  105. expr is not self.default_rule)
  106. return '\n'.join(expr.as_rule() for expr in exprs)
  107. def __repr__(self):
  108. """Return an expression that will reconstitute the grammar."""
  109. return "Grammar({!r})".format(str(self))
  110. class TokenGrammar(Grammar):
  111. """A Grammar which takes a list of pre-lexed tokens instead of text
  112. This is useful if you want to do the lexing yourself, as a separate pass:
  113. for example, to implement indentation-based languages.
  114. """
  115. def _expressions_from_rules(self, rules, custom_rules):
  116. tree = rule_grammar.parse(rules)
  117. return TokenRuleVisitor(custom_rules).visit(tree)
  118. class BootstrappingGrammar(Grammar):
  119. """The grammar used to recognize the textual rules that describe other
  120. grammars
  121. This grammar gets its start from some hard-coded Expressions and claws its
  122. way from there to an expression tree that describes how to parse the
  123. grammar description syntax.
  124. """
  125. def _expressions_from_rules(self, rule_syntax, custom_rules):
  126. """Return the rules for parsing the grammar definition syntax.
  127. Return a 2-tuple: a dict of rule names pointing to their expressions,
  128. and then the top-level expression for the first rule.
  129. """
  130. # Hard-code enough of the rules to parse the grammar that describes the
  131. # grammar description language, to bootstrap:
  132. comment = Regex(r'#[^\r\n]*', name='comment')
  133. meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness')
  134. _ = ZeroOrMore(meaninglessness, name='_')
  135. equals = Sequence(Literal('='), _, name='equals')
  136. label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label')
  137. reference = Sequence(label, Not(equals), name='reference')
  138. quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier')
  139. # This pattern supports empty literals. TODO: A problem?
  140. spaceless_literal = Regex(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"',
  141. ignore_case=True,
  142. dot_all=True,
  143. name='spaceless_literal')
  144. literal = Sequence(spaceless_literal, _, name='literal')
  145. regex = Sequence(Literal('~'),
  146. literal,
  147. Regex('[ilmsuxa]*', ignore_case=True),
  148. _,
  149. name='regex')
  150. atom = OneOf(reference, literal, regex, name='atom')
  151. quantified = Sequence(atom, quantifier, name='quantified')
  152. term = OneOf(quantified, atom, name='term')
  153. not_term = Sequence(Literal('!'), term, _, name='not_term')
  154. term.members = (not_term,) + term.members
  155. sequence = Sequence(term, OneOrMore(term), name='sequence')
  156. or_term = Sequence(Literal('/'), _, term, name='or_term')
  157. ored = Sequence(term, OneOrMore(or_term), name='ored')
  158. expression = OneOf(ored, sequence, term, name='expression')
  159. rule = Sequence(label, equals, expression, name='rule')
  160. rules = Sequence(_, OneOrMore(rule), name='rules')
  161. # Use those hard-coded rules to parse the (more extensive) rule syntax.
  162. # (For example, unless I start using parentheses in the rule language
  163. # definition itself, I should never have to hard-code expressions for
  164. # those above.)
  165. rule_tree = rules.parse(rule_syntax)
  166. # Turn the parse tree into a map of expressions:
  167. return RuleVisitor().visit(rule_tree)
  168. # The grammar for parsing PEG grammar definitions:
  169. # This is a nice, simple grammar. We may someday add to it, but it's a safe bet
  170. # that the future will always be a superset of this.
  171. rule_syntax = (r'''
  172. # Ignored things (represented by _) are typically hung off the end of the
  173. # leafmost kinds of nodes. Literals like "/" count as leaves.
  174. rules = _ rule*
  175. rule = label equals expression
  176. equals = "=" _
  177. literal = spaceless_literal _
  178. # So you can't spell a regex like `~"..." ilm`:
  179. spaceless_literal = ~"u?r?b?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""is /
  180. ~"u?r?b?'[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"is
  181. expression = ored / sequence / term
  182. or_term = "/" _ term
  183. ored = term or_term+
  184. sequence = term term+
  185. not_term = "!" term _
  186. lookahead_term = "&" term _
  187. term = not_term / lookahead_term / quantified / atom
  188. quantified = atom quantifier
  189. atom = reference / literal / regex / parenthesized
  190. regex = "~" spaceless_literal ~"[ilmsuxa]*"i _
  191. parenthesized = "(" _ expression ")" _
  192. quantifier = ~r"[*+?]|\{\d*,\d+\}|\{\d+,\d*\}|\{\d+\}" _
  193. reference = label !equals
  194. # A subsequent equal sign is the only thing that distinguishes a label
  195. # (which begins a new rule) from a reference (which is just a pointer to a
  196. # rule defined somewhere else):
  197. label = ~"[a-zA-Z_][a-zA-Z_0-9]*(?![\"'])" _
  198. # _ = ~r"\s*(?:#[^\r\n]*)?\s*"
  199. _ = meaninglessness*
  200. meaninglessness = ~r"\s+" / comment
  201. comment = ~r"#[^\r\n]*"
  202. ''')
  203. class LazyReference(str):
  204. """A lazy reference to a rule, which we resolve after grokking all the
  205. rules"""
  206. name = ''
  207. def resolve_refs(self, rule_map):
  208. """
  209. Traverse the rule map following top-level lazy references,
  210. until we reach a cycle (raise an error) or a concrete expression.
  211. For example, the following is a circular reference:
  212. foo = bar
  213. baz = foo2
  214. foo2 = foo
  215. Note that every RHS of a grammar rule _must_ be either a
  216. LazyReference or a concrete expression, so the reference chain will
  217. eventually either terminate or find a cycle.
  218. """
  219. seen = set()
  220. cur = self
  221. while True:
  222. if cur in seen:
  223. raise BadGrammar(f"Circular Reference resolving {self.name}={self}.")
  224. else:
  225. seen.add(cur)
  226. try:
  227. cur = rule_map[str(cur)]
  228. except KeyError:
  229. raise UndefinedLabel(cur)
  230. if not isinstance(cur, LazyReference):
  231. return cur
  232. # Just for debugging:
  233. def _as_rhs(self):
  234. return '<LazyReference to %s>' % self
  235. class RuleVisitor(NodeVisitor):
  236. """Turns a parse tree of a grammar definition into a map of ``Expression``
  237. objects
  238. This is the magic piece that breathes life into a parsed bunch of parse
  239. rules, allowing them to go forth and parse other things.
  240. """
  241. quantifier_classes = {'?': Optional, '*': ZeroOrMore, '+': OneOrMore}
  242. visit_expression = visit_term = visit_atom = NodeVisitor.lift_child
  243. def __init__(self, custom_rules=None):
  244. """Construct.
  245. :arg custom_rules: A dict of {rule name: expression} holding custom
  246. rules which will take precedence over the others
  247. """
  248. self.custom_rules = custom_rules or {}
  249. self._last_literal_node_and_type = None
  250. def visit_parenthesized(self, node, parenthesized):
  251. """Treat a parenthesized subexpression as just its contents.
  252. Its position in the tree suffices to maintain its grouping semantics.
  253. """
  254. left_paren, _, expression, right_paren, _ = parenthesized
  255. return expression
  256. def visit_quantifier(self, node, quantifier):
  257. """Turn a quantifier into just its symbol-matching node."""
  258. symbol, _ = quantifier
  259. return symbol
  260. def visit_quantified(self, node, quantified):
  261. atom, quantifier = quantified
  262. try:
  263. return self.quantifier_classes[quantifier.text](atom)
  264. except KeyError:
  265. # This should pass: assert re.full_match("\{(\d*)(,(\d*))?\}", quantifier)
  266. quantifier = quantifier.text[1:-1].split(",")
  267. if len(quantifier) == 1:
  268. min_match = max_match = int(quantifier[0])
  269. else:
  270. min_match = int(quantifier[0]) if quantifier[0] else 0
  271. max_match = int(quantifier[1]) if quantifier[1] else float('inf')
  272. return Quantifier(atom, min=min_match, max=max_match)
  273. def visit_lookahead_term(self, node, lookahead_term):
  274. ampersand, term, _ = lookahead_term
  275. return Lookahead(term)
  276. def visit_not_term(self, node, not_term):
  277. exclamation, term, _ = not_term
  278. return Not(term)
  279. def visit_rule(self, node, rule):
  280. """Assign a name to the Expression and return it."""
  281. label, equals, expression = rule
  282. expression.name = label # Assign a name to the expr.
  283. return expression
  284. def visit_sequence(self, node, sequence):
  285. """A parsed Sequence looks like [term node, OneOrMore node of
  286. ``another_term``s]. Flatten it out."""
  287. term, other_terms = sequence
  288. return Sequence(term, *other_terms)
  289. def visit_ored(self, node, ored):
  290. first_term, other_terms = ored
  291. return OneOf(first_term, *other_terms)
  292. def visit_or_term(self, node, or_term):
  293. """Return just the term from an ``or_term``.
  294. We already know it's going to be ored, from the containing ``ored``.
  295. """
  296. slash, _, term = or_term
  297. return term
  298. def visit_label(self, node, label):
  299. """Turn a label into a unicode string."""
  300. name, _ = label
  301. return name.text
  302. def visit_reference(self, node, reference):
  303. """Stick a :class:`LazyReference` in the tree as a placeholder.
  304. We resolve them all later.
  305. """
  306. label, not_equals = reference
  307. return LazyReference(label)
  308. def visit_regex(self, node, regex):
  309. """Return a ``Regex`` expression."""
  310. tilde, literal, flags, _ = regex
  311. flags = flags.text.upper()
  312. pattern = literal.literal # Pull the string back out of the Literal
  313. # object.
  314. return Regex(pattern, ignore_case='I' in flags,
  315. locale='L' in flags,
  316. multiline='M' in flags,
  317. dot_all='S' in flags,
  318. unicode='U' in flags,
  319. verbose='X' in flags,
  320. ascii='A' in flags)
  321. def visit_spaceless_literal(self, spaceless_literal, visited_children):
  322. """Turn a string literal into a ``Literal`` that recognizes it."""
  323. literal_value = evaluate_string(spaceless_literal.text)
  324. if self._last_literal_node_and_type:
  325. last_node, last_type = self._last_literal_node_and_type
  326. if last_type != type(literal_value):
  327. raise BadGrammar(dedent(f"""\
  328. Found {last_node.text} ({last_type}) and {spaceless_literal.text} ({type(literal_value)}) string literals.
  329. All strings in a single grammar must be of the same type.
  330. """)
  331. )
  332. self._last_literal_node_and_type = spaceless_literal, type(literal_value)
  333. return Literal(literal_value)
  334. def visit_literal(self, node, literal):
  335. """Pick just the literal out of a literal-and-junk combo."""
  336. spaceless_literal, _ = literal
  337. return spaceless_literal
  338. def generic_visit(self, node, visited_children):
  339. """Replace childbearing nodes with a list of their children; keep
  340. others untouched.
  341. For our case, if a node has children, only the children are important.
  342. Otherwise, keep the node around for (for example) the flags of the
  343. regex rule. Most of these kept-around nodes are subsequently thrown
  344. away by the other visitor methods.
  345. We can't simply hang the visited children off the original node; that
  346. would be disastrous if the node occurred in more than one place in the
  347. tree.
  348. """
  349. return visited_children or node # should semantically be a tuple
  350. def visit_rules(self, node, rules_list):
  351. """Collate all the rules into a map. Return (map, default rule).
  352. The default rule is the first one. Or, if you have more than one rule
  353. of that name, it's the last-occurring rule of that name. (This lets you
  354. override the default rule when you extend a grammar.) If there are no
  355. string-based rules, the default rule is None, because the custom rules,
  356. due to being kwarg-based, are unordered.
  357. """
  358. _, rules = rules_list
  359. # Map each rule's name to its Expression. Later rules of the same name
  360. # override earlier ones. This lets us define rules multiple times and
  361. # have the last declaration win, so you can extend grammars by
  362. # concatenation.
  363. rule_map = OrderedDict((expr.name, expr) for expr in rules)
  364. # And custom rules override string-based rules. This is the least
  365. # surprising choice when you compare the dict constructor:
  366. # dict({'x': 5}, x=6).
  367. rule_map.update(self.custom_rules)
  368. # Resolve references. This tolerates forward references.
  369. for name, rule in list(rule_map.items()):
  370. if hasattr(rule, 'resolve_refs'):
  371. # Some custom rules may not define a resolve_refs method,
  372. # though anything that inherits from Expression will have it.
  373. rule_map[name] = rule.resolve_refs(rule_map)
  374. # isinstance() is a temporary hack around the fact that * rules don't
  375. # always get transformed into lists by NodeVisitor. We should fix that;
  376. # it's surprising and requires writing lame branches like this.
  377. return rule_map, (rule_map[rules[0].name]
  378. if isinstance(rules, list) and rules else None)
  379. class TokenRuleVisitor(RuleVisitor):
  380. """A visitor which builds expression trees meant to work on sequences of
  381. pre-lexed tokens rather than strings"""
  382. def visit_spaceless_literal(self, spaceless_literal, visited_children):
  383. """Turn a string literal into a ``TokenMatcher`` that matches
  384. ``Token`` objects by their ``type`` attributes."""
  385. return TokenMatcher(evaluate_string(spaceless_literal.text))
  386. def visit_regex(self, node, regex):
  387. tilde, literal, flags, _ = regex
  388. raise BadGrammar('Regexes do not make sense in TokenGrammars, since '
  389. 'TokenGrammars operate on pre-lexed tokens rather '
  390. 'than characters.')
  391. # Bootstrap to level 1...
  392. rule_grammar = BootstrappingGrammar(rule_syntax)
  393. # ...and then to level 2. This establishes that the node tree of our rule
  394. # syntax is built by the same machinery that will build trees of our users'
  395. # grammars. And the correctness of that tree is tested, indirectly, in
  396. # test_grammar.
  397. rule_grammar = Grammar(rule_syntax)
  398. # TODO: Teach Expression trees how to spit out Python representations of
  399. # themselves. Then we can just paste that in above, and we won't have to
  400. # bootstrap on import. Though it'll be a little less DRY. [Ah, but this is not
  401. # so clean, because it would have to output multiple statements to get multiple
  402. # refs to a single expression hooked up.]