projects
/
matthijs
/
master-project
/
report.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add the ->> symbol to the lambda prettyprinter.
[matthijs/master-project/report.git]
/
pret-lam.lua
diff --git
a/pret-lam.lua
b/pret-lam.lua
index f8ddb676088a7870fd94a979276d3283aa7b7dd6..25f4f7a40c173f0a00e573e01434788f7ea0683c 100644
(file)
--- a/
pret-lam.lua
+++ b/
pret-lam.lua
@@
-19,17
+19,18
@@
local colors = {
local symbols = {
-- Note, the space we replace with is a Unicode non-breaking space
-- (U+00A0).
local symbols = {
-- Note, the space we replace with is a Unicode non-breaking space
-- (U+00A0).
- [' '] = {repr = ' '},
- ['_'] = {repr = '\\_'},
- ['->'] = {repr = '→'},
- ['=>'] = {repr = '⇒'},
+ {symbol = ' ', repr = ' '},
+ {symbol = '_', repr = '\\_'},
+ {symbol = '->>', repr = '\\twoheadrightarrow'},
+ {symbol = '->', repr = '→'},
+ {symbol = '=>', repr = '⇒'},
-- The default * sits very high above the baseline, \ast (u+2217) looks
-- better.
-- The default * sits very high above the baseline, \ast (u+2217) looks
-- better.
-
['*'] = {
repr = '\\ast'},
-
['~'] = {
repr = '\\HDLine[width=.20 * \\the\\textwidth]'},
-
['|'] = {
repr = '\\char' .. utf.byte('|')},
+
{symbol = '*',
repr = '\\ast'},
+
{symbol = '~',
repr = '\\HDLine[width=.20 * \\the\\textwidth]'},
+
{symbol = '|',
repr = '\\char' .. utf.byte('|')},
-- Use ▶ from our roman font, since Iwona doesn't have the glyph
-- Use ▶ from our roman font, since Iwona doesn't have the glyph
-
['▶'] = {
repr = '{\\rm{}▶}'},
+
{symbol = '▶',
repr = '{\\rm{}▶}'},
}
-- Keywords that should be bold
}
-- Keywords that should be bold
@@
-63,9
+64,9
@@
end
-- character. We can do a lookup directly, since symbols can be different in
-- length, so we just loop over all symbols, trying them in turn.
local function take_symbol(str)
-- character. We can do a lookup directly, since symbols can be different in
-- length, so we just loop over all symbols, trying them in turn.
local function take_symbol(str)
- for
symbol,props in
pairs(symbols) do
+ for
i,props in i
pairs(symbols) do
-- Try to remove symbol from the start of str
-- Try to remove symbol from the start of str
- symbol, newstr = utf.match(str, "^(" .. symbol .. ")(.*)")
+ symbol, newstr = utf.match(str, "^(" ..
props.
symbol .. ")(.*)")
if symbol then
-- Return this tokens repr, or just the token if it has no
-- repr.
if symbol then
-- Return this tokens repr, or just the token if it has no
-- repr.
@@
-221,12
+222,16
@@
vis.begin_of_inline = vis.begin_of_display
vis.end_of_inline = vis.end_of_display
function vis.flush_line(str,nested)
vis.end_of_inline = vis.end_of_display
function vis.flush_line(str,nested)
- buffers.flush_result(vis.do_line(str), nested)
+ buffers.flush_result(vis.do_line(str
, false
), nested)
end
end
-function vis.do_line(str)
- result = {}
- str = do_indent(str)
+function vis.do_line(str, no_indent)
+ local result = {}
+ if not no_indent then
+ -- Allow ignore of the indentation stuff when we're calling ourselves
+ -- for a partial line.
+ str = do_indent(str)
+ end
while str ~= "" do
local found = false
local word, symbol
while str ~= "" do
local found = false
local word, symbol
@@
-234,10
+239,17
@@
function vis.do_line(str)
if text then
table.insert(result, '\\strikethrough{')
-- Recursively call ourselves to handle spaces gracefully.
if text then
table.insert(result, '\\strikethrough{')
-- Recursively call ourselves to handle spaces gracefully.
- result = array_concat(result, vis.do_line(text))
+ result = array_concat(result, vis.do_line(text
, true
))
table.insert(result, '}')
-- Eat the processed characters
str = rest
table.insert(result, '}')
-- Eat the processed characters
str = rest
+ elseif utf.match(str, "^%-%-") then
+ table.insert(result, '{\\italic{--')
+ -- Recursively call ourselves to handle spaces gracefully.
+ result = array_concat(result, vis.do_line(utf.sub(str, 3), true))
+ table.insert(result, '}}')
+ -- Done with this line
+ str = ''
else
-- See if the next token is a word
word, str = take_word(str)
else
-- See if the next token is a word
word, str = take_word(str)