-
|
In case I want to avoid having closing tags for block tokens but rather use indentation, are there any good examples(easy to understand) how to parse the matched content and figure out proper nesting and wrapping? I wanted to do it by |
Beta Was this translation helpful? Give feedback.
-
|
Seems i have finally figured it out. I was on the right track: const nestTest = {
name: 'nest-test',
level: 'block',
start(src) {
return src.match(/:::(info|warn|positive|negative)\n/)?.index;
},
tokenizer(src, tokens) {
const match = src.match(/^:::(info|warn|positive|negative)\n/)
if (!match) {
return
}
const lines = src.split("\n")
const replaced = []
replaced.push(lines.shift()) // ignore opening tag but preserve it for "raw"
const groups = []
let group = []
let indent = 0
for (let line of lines) {
if (indent === 0 && line.length === 0) {
break
}
const offset = countIndent(line)
if (indent > 0 && offset === 0) {
break
}
if (offset !== indent) {
groups.push(group)
group = []
indent = offset
}
// we must propagate offset line because we need it to properly work as a block divider in join()
if (" ".repeat(offset) === line) {
group.push(line)
} else {
group.push(line.slice(offset < 4 ? offset : 4))
}
replaced.push(line)
}
if (group.length > 0) {
// avoid appending empty line that won't get rendered anyway
if (group.length > 1 || group[0].length > 0) {
groups.push(group)
}
}
const token = {
type: 'flash',
raw: replaced.join("\n"),
kind: match[1],
tokens: []
};
for (let group of groups) {
this.lexer.blockTokens(group.join("\n"), token.tokens);
}
return token;
}
};
// for some reason using line.search(/[^ ]/) does not yield expected results.
function countIndent(line) {
let count = 0
for (let i = 0; i < line.length; i++) {
if (line.charAt(i) === ' ') {
count++
} else {
break;
}
}
return count
} |
Beta Was this translation helpful? Give feedback.
Seems i have finally figured it out. I was on the right track: