diff --git a/src/Lexer.js b/src/Lexer.js adds linetracking to marked.js v4.3.0; add data-ln="%d" to most tags, %d is the source markdown line --- a/src/Lexer.js +++ b/src/Lexer.js @@ -52,4 +52,5 @@ function mangle(text) { export class Lexer { constructor(options) { + this.ln = 1; // like most editors, start couting from 1 this.tokens = []; this.tokens.links = Object.create(null); @@ -128,4 +129,15 @@ export class Lexer { } + set_ln(token, ln = this.ln) { + // assigns ln (the current line numer) to the token, + // then bump this.ln by the number of newlines in the contents + // + // if ln is set, also assigns the line counter to a new value + // (usually a backup value from before a call into a subparser + // which bumped the linecounter by a subset of the newlines) + token.ln = ln; + this.ln = ln + (token.raw.match(/\n/g) || []).length; + } + /** * Lexing @@ -140,7 +152,11 @@ export class Lexer { } - let token, lastToken, cutSrc, lastParagraphClipped; + let token, lastToken, cutSrc, lastParagraphClipped, ln; while (src) { + // this.ln will be bumped by recursive calls into this func; + // reset the count and rely on the outermost token's raw only + ln = this.ln; + if (this.options.extensions && this.options.extensions.block @@ -148,4 +164,5 @@ export class Lexer { if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); return true; @@ -159,4 +176,5 @@ export class Lexer { if (token = this.tokenizer.space(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); // is \n if not type if (token.raw.length === 1 && tokens.length > 0) { // if there's a single \n as a spacer, it's terminating the last line, @@ -172,4 +190,5 @@ export class Lexer { if (token = this.tokenizer.code(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; // An indented code block cannot interrupt a paragraph. @@ -187,4 +206,5 @@ export class Lexer { if (token = this.tokenizer.fences(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -194,4 +214,5 @@ export class Lexer { if (token = this.tokenizer.heading(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -201,4 +222,5 @@ export class Lexer { if (token = this.tokenizer.hr(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -208,4 +230,5 @@ export class Lexer { if (token = this.tokenizer.blockquote(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -215,4 +238,5 @@ export class Lexer { if (token = this.tokenizer.list(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -222,4 +246,5 @@ export class Lexer { if (token = this.tokenizer.html(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -229,4 +254,5 @@ export class Lexer { if (token = this.tokenizer.def(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) { @@ -246,4 +272,5 @@ export class Lexer { if (token = this.tokenizer.table(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -253,4 +280,5 @@ export class Lexer { if (token = this.tokenizer.lheading(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); tokens.push(token); continue; @@ -273,4 +301,5 @@ export class Lexer { } if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) { + this.set_ln(token, ln); lastToken = tokens[tokens.length - 1]; if (lastParagraphClipped && lastToken.type === 'paragraph') { @@ -290,4 +319,6 @@ export class Lexer { if (token = this.tokenizer.text(src)) { src = src.substring(token.raw.length); + this.set_ln(token, ln); + this.ln++; lastToken = tokens[tokens.length - 1]; if (lastToken && lastToken.type === 'text') { @@ -367,4 +398,5 @@ export class Lexer { if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); + this.ln = token.ln || this.ln; tokens.push(token); return true; @@ -432,4 +464,6 @@ export class Lexer { if (token = this.tokenizer.br(src)) { src = src.substring(token.raw.length); + // no need to reset (no more blockTokens anyways) + token.ln = this.ln++; tokens.push(token); continue; @@ -474,4 +508,5 @@ export class Lexer { if (token = this.tokenizer.inlineText(cutSrc, smartypants)) { src = src.substring(token.raw.length); + this.ln = token.ln || this.ln; if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started prevChar = token.raw.slice(-1); diff --git a/src/Parser.js b/src/Parser.js index a22a2bc..884ad66 100644 --- a/src/Parser.js +++ b/src/Parser.js @@ -18,4 +18,5 @@ export class Parser { this.textRenderer = new TextRenderer(); this.slugger = new Slugger(); + this.ln = 0; // error indicator; should always be set >=1 from tokens } @@ -64,4 +65,8 @@ export class Parser { for (i = 0; i < l; i++) { token = tokens[i]; + // take line-numbers from tokens whenever possible + // and update the renderer's html attribute with the new value + this.ln = token.ln || this.ln; + this.renderer.tag_ln(this.ln); // Run any renderer extensions @@ -124,7 +129,10 @@ export class Parser { } - body += this.renderer.tablerow(cell); + // the +2 is to skip the table header + body += this.renderer.tag_ln(token.ln + j + 2).tablerow(cell); } - out += this.renderer.table(header, body); + // the html attribute is now at the end of the table, + // reset it before writing the