mirror of
https://github.com/9001/copyparty.git
synced 2025-08-17 09:02:15 -06:00
302 lines
11 KiB
Diff
302 lines
11 KiB
Diff
diff --git a/src/Lexer.js b/src/Lexer.js
|
|
adds linetracking to marked.js v4.0.6;
|
|
add data-ln="%d" to most tags, %d is the source markdown line
|
|
--- a/src/Lexer.js
|
|
+++ b/src/Lexer.js
|
|
@@ -50,4 +50,5 @@ function mangle(text) {
|
|
export class Lexer {
|
|
constructor(options) {
|
|
+ this.ln = 1; // like most editors, start couting from 1
|
|
this.tokens = [];
|
|
this.tokens.links = Object.create(null);
|
|
@@ -127,4 +128,15 @@ export class Lexer {
|
|
}
|
|
|
|
+ set_ln(token, ln = this.ln) {
|
|
+ // assigns ln (the current line numer) to the token,
|
|
+ // then bump this.ln by the number of newlines in the contents
|
|
+ //
|
|
+ // if ln is set, also assigns the line counter to a new value
|
|
+ // (usually a backup value from before a call into a subparser
|
|
+ // which bumped the linecounter by a subset of the newlines)
|
|
+ token.ln = ln;
|
|
+ this.ln = ln + (token.raw.match(/\n/g) || []).length;
|
|
+ }
|
|
+
|
|
/**
|
|
* Lexing
|
|
@@ -134,7 +146,11 @@ export class Lexer {
|
|
src = src.replace(/^ +$/gm, '');
|
|
}
|
|
- let token, lastToken, cutSrc, lastParagraphClipped;
|
|
+ let token, lastToken, cutSrc, lastParagraphClipped, ln;
|
|
|
|
while (src) {
|
|
+ // this.ln will be bumped by recursive calls into this func;
|
|
+ // reset the count and rely on the outermost token's raw only
|
|
+ ln = this.ln;
|
|
+
|
|
if (this.options.extensions
|
|
&& this.options.extensions.block
|
|
@@ -142,4 +158,5 @@ export class Lexer {
|
|
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
return true;
|
|
@@ -153,4 +170,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.space(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln); // is \n if not type
|
|
if (token.type) {
|
|
tokens.push(token);
|
|
@@ -162,4 +180,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.code(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
lastToken = tokens[tokens.length - 1];
|
|
// An indented code block cannot interrupt a paragraph.
|
|
@@ -177,4 +196,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.fences(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -184,4 +204,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.heading(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -191,4 +212,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.hr(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -198,4 +220,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.blockquote(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -205,4 +228,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.list(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -212,4 +236,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.html(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -219,4 +244,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.def(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
lastToken = tokens[tokens.length - 1];
|
|
if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
|
|
@@ -236,4 +262,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.table(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -243,4 +270,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.lheading(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -263,4 +291,5 @@ export class Lexer {
|
|
}
|
|
if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) {
|
|
+ this.set_ln(token, ln);
|
|
lastToken = tokens[tokens.length - 1];
|
|
if (lastParagraphClipped && lastToken.type === 'paragraph') {
|
|
@@ -280,4 +309,6 @@ export class Lexer {
|
|
if (token = this.tokenizer.text(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.set_ln(token, ln);
|
|
+ this.ln++;
|
|
lastToken = tokens[tokens.length - 1];
|
|
if (lastToken && lastToken.type === 'text') {
|
|
@@ -355,4 +386,5 @@ export class Lexer {
|
|
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.ln = token.ln || this.ln;
|
|
tokens.push(token);
|
|
return true;
|
|
@@ -420,4 +452,6 @@ export class Lexer {
|
|
if (token = this.tokenizer.br(src)) {
|
|
src = src.substring(token.raw.length);
|
|
+ // no need to reset (no more blockTokens anyways)
|
|
+ token.ln = this.ln++;
|
|
tokens.push(token);
|
|
continue;
|
|
@@ -462,4 +496,5 @@ export class Lexer {
|
|
if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
|
|
src = src.substring(token.raw.length);
|
|
+ this.ln = token.ln || this.ln;
|
|
if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
|
|
prevChar = token.raw.slice(-1);
|
|
diff --git a/src/Parser.js b/src/Parser.js
|
|
--- a/src/Parser.js
|
|
+++ b/src/Parser.js
|
|
@@ -18,4 +18,5 @@ export class Parser {
|
|
this.textRenderer = new TextRenderer();
|
|
this.slugger = new Slugger();
|
|
+ this.ln = 0; // error indicator; should always be set >=1 from tokens
|
|
}
|
|
|
|
@@ -64,4 +65,8 @@ export class Parser {
|
|
for (i = 0; i < l; i++) {
|
|
token = tokens[i];
|
|
+ // take line-numbers from tokens whenever possible
|
|
+ // and update the renderer's html attribute with the new value
|
|
+ this.ln = token.ln || this.ln;
|
|
+ this.renderer.tag_ln(this.ln);
|
|
|
|
// Run any renderer extensions
|
|
@@ -124,7 +129,10 @@ export class Parser {
|
|
}
|
|
|
|
- body += this.renderer.tablerow(cell);
|
|
+ // the +2 is to skip the table header
|
|
+ body += this.renderer.tag_ln(token.ln + j + 2).tablerow(cell);
|
|
}
|
|
- out += this.renderer.table(header, body);
|
|
+ // the html attribute is now at the end of the table,
|
|
+ // reset it before writing the <table> tag now
|
|
+ out += this.renderer.tag_ln(token.ln).table(header, body);
|
|
continue;
|
|
}
|
|
@@ -167,8 +175,12 @@ export class Parser {
|
|
|
|
itemBody += this.parse(item.tokens, loose);
|
|
- body += this.renderer.listitem(itemBody, task, checked);
|
|
+ // similar to tables, writing contents before the <ul> tag
|
|
+ // so update the tag attribute as we go
|
|
+ // (assuming all list entries got tagged with a source-line, probably safe w)
|
|
+ body += this.renderer.tag_ln((item.tokens[0] || token).ln).listitem(itemBody, task, checked);
|
|
}
|
|
|
|
- out += this.renderer.list(body, ordered, start);
|
|
+ // then reset to the <ul>'s correct line number and write it
|
|
+ out += this.renderer.tag_ln(token.ln).list(body, ordered, start);
|
|
continue;
|
|
}
|
|
@@ -179,5 +191,6 @@ export class Parser {
|
|
}
|
|
case 'paragraph': {
|
|
- out += this.renderer.paragraph(this.parseInline(token.tokens));
|
|
+ let t = this.parseInline(token.tokens);
|
|
+ out += this.renderer.tag_ln(token.ln).paragraph(t);
|
|
continue;
|
|
}
|
|
@@ -221,4 +234,7 @@ export class Parser {
|
|
token = tokens[i];
|
|
|
|
+ // another thing that only affects <br/> and other inlines
|
|
+ this.ln = token.ln || this.ln;
|
|
+
|
|
// Run any renderer extensions
|
|
if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
|
|
diff --git a/src/Renderer.js b/src/Renderer.js
|
|
--- a/src/Renderer.js
|
|
+++ b/src/Renderer.js
|
|
@@ -11,6 +11,12 @@ export class Renderer {
|
|
constructor(options) {
|
|
this.options = options || defaults;
|
|
+ this.ln = "";
|
|
}
|
|
|
|
+ tag_ln(n) {
|
|
+ this.ln = ' data-ln="' + n + '"';
|
|
+ return this;
|
|
+ };
|
|
+
|
|
code(code, infostring, escaped) {
|
|
const lang = (infostring || '').match(/\S*/)[0];
|
|
@@ -26,10 +32,10 @@ export class Renderer {
|
|
|
|
if (!lang) {
|
|
- return '<pre><code>'
|
|
+ return '<pre' + this.ln + '><code>'
|
|
+ (escaped ? code : escape(code, true))
|
|
+ '</code></pre>\n';
|
|
}
|
|
|
|
- return '<pre><code class="'
|
|
+ return '<pre' + this.ln + '><code class="'
|
|
+ this.options.langPrefix
|
|
+ escape(lang, true)
|
|
@@ -40,5 +46,5 @@ export class Renderer {
|
|
|
|
blockquote(quote) {
|
|
- return '<blockquote>\n' + quote + '</blockquote>\n';
|
|
+ return '<blockquote' + this.ln + '>\n' + quote + '</blockquote>\n';
|
|
}
|
|
|
|
@@ -51,4 +57,5 @@ export class Renderer {
|
|
return '<h'
|
|
+ level
|
|
+ + this.ln
|
|
+ ' id="'
|
|
+ this.options.headerPrefix
|
|
@@ -61,5 +68,5 @@ export class Renderer {
|
|
}
|
|
// ignore IDs
|
|
- return '<h' + level + '>' + text + '</h' + level + '>\n';
|
|
+ return '<h' + level + this.ln + '>' + text + '</h' + level + '>\n';
|
|
}
|
|
|
|
@@ -75,5 +82,5 @@ export class Renderer {
|
|
|
|
listitem(text) {
|
|
- return '<li>' + text + '</li>\n';
|
|
+ return '<li' + this.ln + '>' + text + '</li>\n';
|
|
}
|
|
|
|
@@ -87,5 +94,5 @@ export class Renderer {
|
|
|
|
paragraph(text) {
|
|
- return '<p>' + text + '</p>\n';
|
|
+ return '<p' + this.ln + '>' + text + '</p>\n';
|
|
}
|
|
|
|
@@ -102,5 +109,5 @@ export class Renderer {
|
|
|
|
tablerow(content) {
|
|
- return '<tr>\n' + content + '</tr>\n';
|
|
+ return '<tr' + this.ln + '>\n' + content + '</tr>\n';
|
|
}
|
|
|
|
@@ -127,5 +134,5 @@ export class Renderer {
|
|
|
|
br() {
|
|
- return this.options.xhtml ? '<br/>' : '<br>';
|
|
+ return this.options.xhtml ? '<br' + this.ln + '/>' : '<br' + this.ln + '>';
|
|
}
|
|
|
|
@@ -153,5 +160,5 @@ export class Renderer {
|
|
}
|
|
|
|
- let out = '<img src="' + href + '" alt="' + text + '"';
|
|
+ let out = '<img' + this.ln + ' src="' + href + '" alt="' + text + '"';
|
|
if (title) {
|
|
out += ' title="' + title + '"';
|
|
diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|
--- a/src/Tokenizer.js
|
|
+++ b/src/Tokenizer.js
|
|
@@ -297,4 +297,7 @@ export class Tokenizer {
|
|
const l = list.items.length;
|
|
|
|
+ // each nested list gets +1 ahead; this hack makes every listgroup -1 but atleast it doesn't get infinitely bad
|
|
+ this.lexer.ln--;
|
|
+
|
|
// Item child tokens handled here at end because we needed to have the final item to trim it first
|
|
for (i = 0; i < l; i++) {
|