diff --git a/src/Lexer.js b/src/Lexer.js adds linetracking to marked.js v3.0.4; add data-ln="%d" to most tags, %d is the source markdown line --- a/src/Lexer.js +++ b/src/Lexer.js @@ -50,4 +50,5 @@ function mangle(text) { module.exports = class Lexer { constructor(options) { + this.ln = 1; // like most editors, start couting from 1 this.tokens = []; this.tokens.links = Object.create(null); @@ -127,4 +128,15 @@ module.exports = class Lexer { } + set_ln(token, ln = this.ln) { + // assigns ln (the current line numer) to the token, + // then bump this.ln by the number of newlines in the contents + // + // if ln is set, also assigns the line counter to a new value + // (usually a backup value from before a call into a subparser + // which bumped the linecounter by a subset of the newlines) + token.ln = ln; + this.ln = ln + (token.raw.match(/\n/g) || []).length; + } + /** * Lexing @@ -134,7 +146,11 @@ module.exports = class Lexer { src = src.replace(/^ +$/gm, ''); } - let token, lastToken, cutSrc, lastParagraphClipped; + let token, lastToken, cutSrc, lastParagraphClipped, ln; while (src) { + // this.ln will be bumped by recursive calls into this func; + // reset the count and rely on the outermost token's raw only + ln = this.ln; + if (this.options.extensions && this.options.extensions.block @@ -142,4 +158,5 @@ module.exports = class Lexer { if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); return true; @@ -153,4 +170,5 @@ module.exports = class Lexer { if (token = this.tokenizer.space(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); // is \n if not type if (token.type) { tokens.push(token); @@ -162,4 +180,5 @@ module.exports = class Lexer { if (token = this.tokenizer.code(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; // An indented code block cannot interrupt a paragraph. @@ -177,4 +196,5 @@ module.exports = class Lexer { if (token = this.tokenizer.fences(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -184,4 +204,5 @@ module.exports = class Lexer { if (token = this.tokenizer.heading(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -191,4 +212,5 @@ module.exports = class Lexer { if (token = this.tokenizer.hr(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -198,4 +220,5 @@ module.exports = class Lexer { if (token = this.tokenizer.blockquote(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -205,4 +228,5 @@ module.exports = class Lexer { if (token = this.tokenizer.list(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -212,4 +236,5 @@ module.exports = class Lexer { if (token = this.tokenizer.html(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -219,4 +244,5 @@ module.exports = class Lexer { if (token = this.tokenizer.def(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) { @@ -236,4 +262,5 @@ module.exports = class Lexer { if (token = this.tokenizer.table(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -243,4 +270,5 @@ module.exports = class Lexer { if (token = this.tokenizer.lheading(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -263,4 +291,5 @@ module.exports = class Lexer { } if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) { + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; if (lastParagraphClipped && lastToken.type === 'paragraph') { @@ -280,4 +309,6 @@ module.exports = class Lexer { if (token = this.tokenizer.text(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); + this.ln++; lastToken = tokens[tokens.length - 1]; if (lastToken && lastToken.type === 'text') { @@ -355,4 +386,5 @@ module.exports = class Lexer { if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); + this.ln = token.ln || this.ln; tokens.push(token); return true; @@ -420,4 +452,6 @@ module.exports = class Lexer { if (token = this.tokenizer.br(src)) { src = src.substring(token.raw.length); + // no need to reset (no more blockTokens anyways) + token.ln = this.ln++; tokens.push(token); continue; @@ -462,4 +496,5 @@ module.exports = class Lexer { if (token = this.tokenizer.inlineText(cutSrc, smartypants)) { src = src.substring(token.raw.length); + this.ln = token.ln || this.ln; if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started prevChar = token.raw.slice(-1); diff --git a/src/Parser.js b/src/Parser.js --- a/src/Parser.js +++ b/src/Parser.js @@ -18,4 +18,5 @@ module.exports = class Parser { this.textRenderer = new TextRenderer(); this.slugger = new Slugger(); + this.ln = 0; // error indicator; should always be set >=1 from tokens } @@ -64,4 +65,8 @@ module.exports = class Parser { for (i = 0; i < l; i++) { token = tokens[i]; + // take line-numbers from tokens whenever possible + // and update the renderer's html attribute with the new value + this.ln = token.ln || this.ln; + this.renderer.tag_ln(this.ln); // Run any renderer extensions @@ -124,7 +129,10 @@ module.exports = class Parser { } - body += this.renderer.tablerow(cell); + // the +2 is to skip the table header + body += this.renderer.tag_ln(token.ln + j + 2).tablerow(cell); } - out += this.renderer.table(header, body); + // the html attribute is now at the end of the table, + // reset it before writing the