diff --git a/src/Lexer.js b/src/Lexer.js adds linetracking to marked.js v1.0.0; add data-ln="%d" to most tags, %d is the source markdown line --- a/src/Lexer.js +++ b/src/Lexer.js @@ -49,4 +49,5 @@ function mangle(text) { module.exports = class Lexer { constructor(options) { + this.ln = 1; // like most editors, start couting from 1 this.tokens = []; this.tokens.links = Object.create(null); @@ -108,4 +109,15 @@ module.exports = class Lexer { } + set_ln(token, ln = this.ln) { + // assigns ln (the current line numer) to the token, + // then bump this.ln by the number of newlines in the contents + // + // if ln is set, also assigns the line counter to a new value + // (usually a backup value from before a call into a subparser + // which bumped the linecounter by a subset of the newlines) + token.ln = ln; + this.ln = ln + (token.raw.match(/\n/g) || []).length; + } + /** * Lexing @@ -113,10 +125,15 @@ module.exports = class Lexer { blockTokens(src, tokens = [], top = true) { src = src.replace(/^ +$/gm, ''); - let token, i, l; + let token, i, l, ln; while (src) { + // this.ln will be bumped by recursive calls into this func; + // reset the count and rely on the outermost token's raw only + ln = this.ln; + // newline if (token = this.tokenizer.space(src)) { src = src.substring(token.raw.length); + this.set_ln(token); // is \n if not type if (token.type) { tokens.push(token); @@ -128,4 +145,5 @@ module.exports = class Lexer { if (token = this.tokenizer.code(src, tokens)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -135,4 +153,5 @@ module.exports = class Lexer { if (token = this.tokenizer.fences(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -142,4 +161,5 @@ module.exports = class Lexer { if (token = this.tokenizer.heading(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -149,4 +169,5 @@ module.exports = class Lexer { if (token = this.tokenizer.nptable(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -156,4 +177,5 @@ module.exports = class Lexer { if (token = this.tokenizer.hr(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -164,4 +186,7 @@ module.exports = class Lexer { src = src.substring(token.raw.length); token.tokens = this.blockTokens(token.text, [], top); + // recursive call to blockTokens probably bumped this.ln, + // token.raw is more reliable so reset this.ln and use that + this.set_ln(token, ln); tokens.push(token); continue; @@ -174,5 +199,9 @@ module.exports = class Lexer { for (i = 0; i < l; i++) { token.items[i].tokens = this.blockTokens(token.items[i].text, [], false); + // list entries don't bump the linecounter, so let's + this.ln++; } + // then reset like blockquote + this.set_ln(token, ln); tokens.push(token); continue; @@ -182,4 +211,5 @@ module.exports = class Lexer { if (token = this.tokenizer.html(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -189,4 +219,5 @@ module.exports = class Lexer { if (top && (token = this.tokenizer.def(src))) { src = src.substring(token.raw.length); + this.set_ln(token); if (!this.tokens.links[token.tag]) { this.tokens.links[token.tag] = { @@ -201,4 +232,5 @@ module.exports = class Lexer { if (token = this.tokenizer.table(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -208,4 +240,5 @@ module.exports = class Lexer { if (token = this.tokenizer.lheading(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -215,4 +248,5 @@ module.exports = class Lexer { if (top && (token = this.tokenizer.paragraph(src))) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -222,4 +256,5 @@ module.exports = class Lexer { if (token = this.tokenizer.text(src)) { src = src.substring(token.raw.length); + this.set_ln(token); tokens.push(token); continue; @@ -251,4 +286,7 @@ module.exports = class Lexer { for (i = 0; i < l; i++) { token = tokens[i]; + // this.ln is at EOF when inline() is invoked; + // all this affects
tags only so no biggie if it breaks + this.ln = token.ln || this.ln; switch (token.type) { case 'paragraph': @@ -374,4 +412,6 @@ module.exports = class Lexer { if (token = this.tokenizer.br(src)) { src = src.substring(token.raw.length); + // no need to reset (no more blockTokens anyways) + token.ln = this.ln++; tokens.push(token); continue; diff --git a/src/Parser.js b/src/Parser.js index bad3ac7..882da47 100644 --- a/src/Parser.js +++ b/src/Parser.js @@ -18,4 +18,5 @@ module.exports = class Parser { this.textRenderer = new TextRenderer(); this.slugger = new Slugger(); + this.ln = 0; // error indicator; should always be set >=1 from tokens } @@ -55,4 +56,9 @@ module.exports = class Parser { for (i = 0; i < l; i++) { token = tokens[i]; + // take line-numbers from tokens whenever possible + // and update the renderer's html attribute with the new value + this.ln = token.ln || this.ln; + this.renderer.tag_ln(this.ln); + switch (token.type) { case 'space': { @@ -105,7 +111,10 @@ module.exports = class Parser { } - body += this.renderer.tablerow(cell); + // the +2 is to skip the table header + body += this.renderer.tag_ln(token.ln + j + 2).tablerow(cell); } - out += this.renderer.table(header, body); + // the html attribute is now at the end of the table, + // reset it before writing the tag now + out += this.renderer.tag_ln(token.ln).table(header, body); continue; } @@ -148,8 +157,12 @@ module.exports = class Parser { itemBody += this.parse(item.tokens, loose); - body += this.renderer.listitem(itemBody, task, checked); + // similar to tables, writing contents before the