diff --git a/packages/string-templates/package.json b/packages/string-templates/package.json
index 1f3e1b618a..340d74ef8a 100644
--- a/packages/string-templates/package.json
+++ b/packages/string-templates/package.json
@@ -13,8 +13,7 @@
},
"./package.json": "./package.json",
"./test/utils": "./test/utils.js",
- "./iife": "./src/iife.js",
- "./snippet": "./src/helpers/snippet.js"
+ "./iife": "./src/iife.js"
},
"files": [
"dist",
diff --git a/packages/string-templates/src/helpers/snippet.js b/packages/string-templates/src/helpers/snippet.js
deleted file mode 100644
index b7269b56cc..0000000000
--- a/packages/string-templates/src/helpers/snippet.js
+++ /dev/null
@@ -1,2 +0,0 @@
-module.exports.CrazyLongSnippet =
- '/**\n * marked - a markdown parser\n * Copyright (c) 2011-2022, Christopher Jeffrey. (MIT Licensed)\n * https://github.com/markedjs/marked\n */\n\n/**\n * DO NOT EDIT THIS FILE\n * The code in this file is generated from files in ./src/\n */\n\nfunction getDefaults() {\n return {\n baseUrl: null,\n breaks: false,\n extensions: null,\n gfm: true,\n headerIds: true,\n headerPrefix: "",\n highlight: null,\n langPrefix: "language-",\n mangle: true,\n pedantic: false,\n renderer: null,\n sanitize: false,\n sanitizer: null,\n silent: false,\n smartLists: false,\n smartypants: false,\n tokenizer: null,\n walkTokens: null,\n xhtml: false,\n }\n}\n\nlet defaults = getDefaults()\n\nfunction changeDefaults(newDefaults) {\n defaults = newDefaults\n}\n\n/**\n * Helpers\n */\nconst escapeTest = /[&<>"\']/\nconst escapeReplace = /[&<>"\']/g\nconst escapeTestNoEncode = /[<>"\']|&(?!#?\\w+;)/\nconst escapeReplaceNoEncode = /[<>"\']|&(?!#?\\w+;)/g\nconst escapeReplacements = {\n "&": "&",\n "<": "<",\n ">": ">",\n \'"\': """,\n "\'": "'",\n}\nconst getEscapeReplacement = ch => escapeReplacements[ch]\nfunction escape(html, encode) {\n if (encode) {\n if (escapeTest.test(html)) {\n return html.replace(escapeReplace, getEscapeReplacement)\n }\n } else {\n if (escapeTestNoEncode.test(html)) {\n return html.replace(escapeReplaceNoEncode, getEscapeReplacement)\n }\n }\n\n return html\n}\n\nconst unescapeTest = /&(#(?:\\d+)|(?:#x[0-9A-Fa-f]+)|(?:\\w+));?/gi\n\n/**\n * @param {string} html\n */\nfunction unescape(html) {\n // explicitly match decimal, hex, and named HTML entities\n return html.replace(unescapeTest, (_, n) => {\n n = n.toLowerCase()\n if (n === "colon") return ":"\n if (n.charAt(0) === "#") {\n return n.charAt(1) === "x"\n ? String.fromCharCode(parseInt(n.substring(2), 16))\n : String.fromCharCode(+n.substring(1))\n }\n return ""\n })\n}\n\nconst caret = /(^|[^\\[])\\^/g\n\n/**\n * @param {string | RegExp} regex\n * @param {string} opt\n */\nfunction edit(regex, opt) {\n regex = typeof regex === "string" ? regex : regex.source\n opt = opt || ""\n const obj = {\n replace: (name, val) => {\n val = val.source || val\n val = val.replace(caret, "$1")\n regex = regex.replace(name, val)\n return obj\n },\n getRegex: () => {\n return new RegExp(regex, opt)\n },\n }\n return obj\n}\n\nconst nonWordAndColonTest = /[^\\w:]/g\nconst originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i\n\n/**\n * @param {boolean} sanitize\n * @param {string} base\n * @param {string} href\n */\nfunction cleanUrl(sanitize, base, href) {\n if (sanitize) {\n let prot\n try {\n prot = decodeURIComponent(unescape(href))\n .replace(nonWordAndColonTest, "")\n .toLowerCase()\n } catch (e) {\n return null\n }\n if (\n prot.indexOf("javascript:") === 0 ||\n prot.indexOf("vbscript:") === 0 ||\n prot.indexOf("data:") === 0\n ) {\n return null\n }\n }\n if (base && !originIndependentUrl.test(href)) {\n href = resolveUrl(base, href)\n }\n try {\n href = encodeURI(href).replace(/%25/g, "%")\n } catch (e) {\n return null\n }\n return href\n}\n\nconst baseUrls = {}\nconst justDomain = /^[^:]+:\\/*[^/]*$/\nconst protocol = /^([^:]+:)[\\s\\S]*$/\nconst domain = /^([^:]+:\\/*[^/]*)[\\s\\S]*$/\n\n/**\n * @param {string} base\n * @param {string} href\n */\nfunction resolveUrl(base, href) {\n if (!baseUrls[" " + base]) {\n // we can ignore everything in base after the last slash of its path component,\n // but we might need to add _that_\n // https://tools.ietf.org/html/rfc3986#section-3\n if (justDomain.test(base)) {\n baseUrls[" " + base] = base + "/"\n } else {\n baseUrls[" " + base] = rtrim(base, "/", true)\n }\n }\n base = baseUrls[" " + base]\n const relativeBase = base.indexOf(":") === -1\n\n if (href.substring(0, 2) === "//") {\n if (relativeBase) {\n return href\n }\n return base.replace(protocol, "$1") + href\n } else if (href.charAt(0) === "/") {\n if (relativeBase) {\n return href\n }\n return base.replace(domain, "$1") + href\n } else {\n return base + href\n }\n}\n\nconst noopTest = { exec: function noopTest() {} }\n\nfunction merge(obj) {\n let i = 1,\n target,\n key\n\n for (; i < arguments.length; i++) {\n target = arguments[i]\n for (key in target) {\n if (Object.prototype.hasOwnProperty.call(target, key)) {\n obj[key] = target[key]\n }\n }\n }\n\n return obj\n}\n\nfunction splitCells(tableRow, count) {\n // ensure that every cell-delimiting pipe has a space\n // before it to distinguish it from an escaped pipe\n const row = tableRow.replace(/\\|/g, (match, offset, str) => {\n let escaped = false,\n curr = offset\n while (--curr >= 0 && str[curr] === "\\\\") escaped = !escaped\n if (escaped) {\n // odd number of slashes means | is escaped\n // so we leave it alone\n return "|"\n } else {\n // add space before unescaped |\n return " |"\n }\n }),\n cells = row.split(/ \\|/)\n let i = 0\n\n // First/last cell in a row cannot be empty if it has no leading/trailing pipe\n if (!cells[0].trim()) {\n cells.shift()\n }\n if (cells.length > 0 && !cells[cells.length - 1].trim()) {\n cells.pop()\n }\n\n if (cells.length > count) {\n cells.splice(count)\n } else {\n while (cells.length < count) cells.push("")\n }\n\n for (; i < cells.length; i++) {\n // leading or trailing whitespace is ignored per the gfm spec\n cells[i] = cells[i].trim().replace(/\\\\\\|/g, "|")\n }\n return cells\n}\n\n/**\n * Remove trailing \'c\'s. Equivalent to str.replace(/c*$/, \'\').\n * /c*$/ is vulnerable to REDOS.\n *\n * @param {string} str\n * @param {string} c\n * @param {boolean} invert Remove suffix of non-c chars instead. Default falsey.\n */\nfunction rtrim(str, c, invert) {\n const l = str.length\n if (l === 0) {\n return ""\n }\n\n // Length of suffix matching the invert condition.\n let suffLen = 0\n\n // Step left until we fail to match the invert condition.\n while (suffLen < l) {\n const currChar = str.charAt(l - suffLen - 1)\n if (currChar === c && !invert) {\n suffLen++\n } else if (currChar !== c && invert) {\n suffLen++\n } else {\n break\n }\n }\n\n return str.slice(0, l - suffLen)\n}\n\nfunction findClosingBracket(str, b) {\n if (str.indexOf(b[1]) === -1) {\n return -1\n }\n const l = str.length\n let level = 0,\n i = 0\n for (; i < l; i++) {\n if (str[i] === "\\\\") {\n i++\n } else if (str[i] === b[0]) {\n level++\n } else if (str[i] === b[1]) {\n level--\n if (level < 0) {\n return i\n }\n }\n }\n return -1\n}\n\nfunction checkSanitizeDeprecation(opt) {\n if (opt && opt.sanitize && !opt.silent) {\n console.warn(\n "marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options"\n )\n }\n}\n\n// copied from https://stackoverflow.com/a/5450113/806777\n/**\n * @param {string} pattern\n * @param {number} count\n */\nfunction repeatString(pattern, count) {\n if (count < 1) {\n return ""\n }\n let result = ""\n while (count > 1) {\n if (count & 1) {\n result += pattern\n }\n count >>= 1\n pattern += pattern\n }\n return result + pattern\n}\n\nfunction outputLink(cap, link, raw, lexer) {\n const href = link.href\n const title = link.title ? escape(link.title) : null\n const text = cap[1].replace(/\\\\([\\[\\]])/g, "$1")\n\n if (cap[0].charAt(0) !== "!") {\n lexer.state.inLink = true\n const token = {\n type: "link",\n raw,\n href,\n title,\n text,\n tokens: lexer.inlineTokens(text, []),\n }\n lexer.state.inLink = false\n return token\n }\n return {\n type: "image",\n raw,\n href,\n title,\n text: escape(text),\n }\n}\n\nfunction indentCodeCompensation(raw, text) {\n const matchIndentToCode = raw.match(/^(\\s+)(?:```)/)\n\n if (matchIndentToCode === null) {\n return text\n }\n\n const indentToCode = matchIndentToCode[1]\n\n return text\n .split("\\n")\n .map(node => {\n const matchIndentInNode = node.match(/^\\s+/)\n if (matchIndentInNode === null) {\n return node\n }\n\n const [indentInNode] = matchIndentInNode\n\n if (indentInNode.length >= indentToCode.length) {\n return node.slice(indentToCode.length)\n }\n\n return node\n })\n .join("\\n")\n}\n\n/**\n * Tokenizer\n */\nclass Tokenizer {\n constructor(options) {\n this.options = options || defaults\n }\n\n space(src) {\n const cap = this.rules.block.newline.exec(src)\n if (cap && cap[0].length > 0) {\n return {\n type: "space",\n raw: cap[0],\n }\n }\n }\n\n code(src) {\n const cap = this.rules.block.code.exec(src)\n if (cap) {\n const text = cap[0].replace(/^ {1,4}/gm, "")\n return {\n type: "code",\n raw: cap[0],\n codeBlockStyle: "indented",\n text: !this.options.pedantic ? rtrim(text, "\\n") : text,\n }\n }\n }\n\n fences(src) {\n const cap = this.rules.block.fences.exec(src)\n if (cap) {\n const raw = cap[0]\n const text = indentCodeCompensation(raw, cap[3] || "")\n\n return {\n type: "code",\n raw,\n lang: cap[2] ? cap[2].trim() : cap[2],\n text,\n }\n }\n }\n\n heading(src) {\n const cap = this.rules.block.heading.exec(src)\n if (cap) {\n let text = cap[2].trim()\n\n // remove trailing #s\n if (/#$/.test(text)) {\n const trimmed = rtrim(text, "#")\n if (this.options.pedantic) {\n text = trimmed.trim()\n } else if (!trimmed || / $/.test(trimmed)) {\n // CommonMark requires space before trailing #s\n text = trimmed.trim()\n }\n }\n\n const token = {\n type: "heading",\n raw: cap[0],\n depth: cap[1].length,\n text,\n tokens: [],\n }\n this.lexer.inline(token.text, token.tokens)\n return token\n }\n }\n\n hr(src) {\n const cap = this.rules.block.hr.exec(src)\n if (cap) {\n return {\n type: "hr",\n raw: cap[0],\n }\n }\n }\n\n blockquote(src) {\n const cap = this.rules.block.blockquote.exec(src)\n if (cap) {\n const text = cap[0].replace(/^ *>[ \\t]?/gm, "")\n\n return {\n type: "blockquote",\n raw: cap[0],\n tokens: this.lexer.blockTokens(text, []),\n text,\n }\n }\n }\n\n list(src) {\n let cap = this.rules.block.list.exec(src)\n if (cap) {\n let raw,\n istask,\n ischecked,\n indent,\n i,\n blankLine,\n endsWithBlankLine,\n line,\n nextLine,\n rawLine,\n itemContents,\n endEarly\n\n let bull = cap[1].trim()\n const isordered = bull.length > 1\n\n const list = {\n type: "list",\n raw: "",\n ordered: isordered,\n start: isordered ? +bull.slice(0, -1) : "",\n loose: false,\n items: [],\n }\n\n bull = isordered ? `\\\\d{1,9}\\\\${bull.slice(-1)}` : `\\\\${bull}`\n\n if (this.options.pedantic) {\n bull = isordered ? bull : "[*+-]"\n }\n\n // Get next list item\n const itemRegex = new RegExp(\n `^( {0,3}${bull})((?:[\\t ][^\\\\n]*)?(?:\\\\n|$))`\n )\n\n // Check if current bullet point can start a new List Item\n while (src) {\n endEarly = false\n if (!(cap = itemRegex.exec(src))) {\n break\n }\n\n if (this.rules.block.hr.test(src)) {\n // End list if bullet was actually HR (possibly move into itemRegex?)\n break\n }\n\n raw = cap[0]\n src = src.substring(raw.length)\n\n line = cap[2].split("\\n", 1)[0]\n nextLine = src.split("\\n", 1)[0]\n\n if (this.options.pedantic) {\n indent = 2\n itemContents = line.trimLeft()\n } else {\n indent = cap[2].search(/[^ ]/) // Find first non-space char\n indent = indent > 4 ? 1 : indent // Treat indented code blocks (> 4 spaces) as having only 1 indent\n itemContents = line.slice(indent)\n indent += cap[1].length\n }\n\n blankLine = false\n\n if (!line && /^ *$/.test(nextLine)) {\n // Items begin with at most one blank line\n raw += nextLine + "\\n"\n src = src.substring(nextLine.length + 1)\n endEarly = true\n }\n\n if (!endEarly) {\n const nextBulletRegex = new RegExp(\n `^ {0,${Math.min(\n 3,\n indent - 1\n )}}(?:[*+-]|\\\\d{1,9}[.)])((?: [^\\\\n]*)?(?:\\\\n|$))`\n )\n const hrRegex = new RegExp(\n `^ {0,${Math.min(\n 3,\n indent - 1\n )}}((?:- *){3,}|(?:_ *){3,}|(?:\\\\* *){3,})(?:\\\\n+|$)`\n )\n\n // Check if following lines should be included in List Item\n while (src) {\n rawLine = src.split("\\n", 1)[0]\n line = rawLine\n\n // Re-align to follow commonmark nesting rules\n if (this.options.pedantic) {\n line = line.replace(/^ {1,4}(?=( {4})*[^ ])/g, " ")\n }\n\n // End list item if found start of new bullet\n if (nextBulletRegex.test(line)) {\n break\n }\n\n // Horizontal rule found\n if (hrRegex.test(src)) {\n break\n }\n\n if (line.search(/[^ ]/) >= indent || !line.trim()) {\n // Dedent if possible\n itemContents += "\\n" + line.slice(indent)\n } else if (!blankLine) {\n // Until blank line, item doesn\'t need indentation\n itemContents += "\\n" + line\n } else {\n // Otherwise, improper indentation ends this item\n break\n }\n\n if (!blankLine && !line.trim()) {\n // Check if current line is blank\n blankLine = true\n }\n\n raw += rawLine + "\\n"\n src = src.substring(rawLine.length + 1)\n }\n }\n\n if (!list.loose) {\n // If the previous item ended with a blank line, the list is loose\n if (endsWithBlankLine) {\n list.loose = true\n } else if (/\\n *\\n *$/.test(raw)) {\n endsWithBlankLine = true\n }\n }\n\n // Check for task list items\n if (this.options.gfm) {\n istask = /^\\[[ xX]\\] /.exec(itemContents)\n if (istask) {\n ischecked = istask[0] !== "[ ] "\n itemContents = itemContents.replace(/^\\[[ xX]\\] +/, "")\n }\n }\n\n list.items.push({\n type: "list_item",\n raw,\n task: !!istask,\n checked: ischecked,\n loose: false,\n text: itemContents,\n })\n\n list.raw += raw\n }\n\n // Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic\n list.items[list.items.length - 1].raw = raw.trimRight()\n list.items[list.items.length - 1].text = itemContents.trimRight()\n list.raw = list.raw.trimRight()\n\n const l = list.items.length\n\n // Item child tokens handled here at end because we needed to have the final item to trim it first\n for (i = 0; i < l; i++) {\n this.lexer.state.top = false\n list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, [])\n const spacers = list.items[i].tokens.filter(t => t.type === "space")\n const hasMultipleLineBreaks = spacers.every(t => {\n const chars = t.raw.split("")\n let lineBreaks = 0\n for (const char of chars) {\n if (char === "\\n") {\n lineBreaks += 1\n }\n if (lineBreaks > 1) {\n return true\n }\n }\n\n return false\n })\n\n if (!list.loose && spacers.length && hasMultipleLineBreaks) {\n // Having a single line break doesn\'t mean a list is loose. A single line break is terminating the last list item\n list.loose = true\n list.items[i].loose = true\n }\n }\n\n return list\n }\n }\n\n html(src) {\n const cap = this.rules.block.html.exec(src)\n if (cap) {\n const token = {\n type: "html",\n raw: cap[0],\n pre:\n !this.options.sanitizer &&\n (cap[1] === "pre" || cap[1] === "script" || cap[1] === "style"),\n text: cap[0],\n }\n if (this.options.sanitize) {\n token.type = "paragraph"\n token.text = this.options.sanitizer\n ? this.options.sanitizer(cap[0])\n : escape(cap[0])\n token.tokens = []\n this.lexer.inline(token.text, token.tokens)\n }\n return token\n }\n }\n\n def(src) {\n const cap = this.rules.block.def.exec(src)\n if (cap) {\n if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1)\n const tag = cap[1].toLowerCase().replace(/\\s+/g, " ")\n return {\n type: "def",\n tag,\n raw: cap[0],\n href: cap[2],\n title: cap[3],\n }\n }\n }\n\n table(src) {\n const cap = this.rules.block.table.exec(src)\n if (cap) {\n const item = {\n type: "table",\n header: splitCells(cap[1]).map(c => {\n return { text: c }\n }),\n align: cap[2].replace(/^ *|\\| *$/g, "").split(/ *\\| */),\n rows:\n cap[3] && cap[3].trim()\n ? cap[3].replace(/\\n[ \\t]*$/, "").split("\\n")\n : [],\n }\n\n if (item.header.length === item.align.length) {\n item.raw = cap[0]\n\n let l = item.align.length\n let i, j, k, row\n for (i = 0; i < l; i++) {\n if (/^ *-+: *$/.test(item.align[i])) {\n item.align[i] = "right"\n } else if (/^ *:-+: *$/.test(item.align[i])) {\n item.align[i] = "center"\n } else if (/^ *:-+ *$/.test(item.align[i])) {\n item.align[i] = "left"\n } else {\n item.align[i] = null\n }\n }\n\n l = item.rows.length\n for (i = 0; i < l; i++) {\n item.rows[i] = splitCells(item.rows[i], item.header.length).map(c => {\n return { text: c }\n })\n }\n\n // parse child tokens inside headers and cells\n\n // header child tokens\n l = item.header.length\n for (j = 0; j < l; j++) {\n item.header[j].tokens = []\n this.lexer.inline(item.header[j].text, item.header[j].tokens)\n }\n\n // cell child tokens\n l = item.rows.length\n for (j = 0; j < l; j++) {\n row = item.rows[j]\n for (k = 0; k < row.length; k++) {\n row[k].tokens = []\n this.lexer.inline(row[k].text, row[k].tokens)\n }\n }\n\n return item\n }\n }\n }\n\n lheading(src) {\n const cap = this.rules.block.lheading.exec(src)\n if (cap) {\n const token = {\n type: "heading",\n raw: cap[0],\n depth: cap[2].charAt(0) === "=" ? 1 : 2,\n text: cap[1],\n tokens: [],\n }\n this.lexer.inline(token.text, token.tokens)\n return token\n }\n }\n\n paragraph(src) {\n const cap = this.rules.block.paragraph.exec(src)\n if (cap) {\n const token = {\n type: "paragraph",\n raw: cap[0],\n text:\n cap[1].charAt(cap[1].length - 1) === "\\n"\n ? cap[1].slice(0, -1)\n : cap[1],\n tokens: [],\n }\n this.lexer.inline(token.text, token.tokens)\n return token\n }\n }\n\n text(src) {\n const cap = this.rules.block.text.exec(src)\n if (cap) {\n const token = {\n type: "text",\n raw: cap[0],\n text: cap[0],\n tokens: [],\n }\n this.lexer.inline(token.text, token.tokens)\n return token\n }\n }\n\n escape(src) {\n const cap = this.rules.inline.escape.exec(src)\n if (cap) {\n return {\n type: "escape",\n raw: cap[0],\n text: escape(cap[1]),\n }\n }\n }\n\n tag(src) {\n const cap = this.rules.inline.tag.exec(src)\n if (cap) {\n if (!this.lexer.state.inLink && /^/i.test(cap[0])) {\n this.lexer.state.inLink = false\n }\n if (\n !this.lexer.state.inRawBlock &&\n /^<(pre|code|kbd|script)(\\s|>)/i.test(cap[0])\n ) {\n this.lexer.state.inRawBlock = true\n } else if (\n this.lexer.state.inRawBlock &&\n /^<\\/(pre|code|kbd|script)(\\s|>)/i.test(cap[0])\n ) {\n this.lexer.state.inRawBlock = false\n }\n\n return {\n type: this.options.sanitize ? "text" : "html",\n raw: cap[0],\n inLink: this.lexer.state.inLink,\n inRawBlock: this.lexer.state.inRawBlock,\n text: this.options.sanitize\n ? this.options.sanitizer\n ? this.options.sanitizer(cap[0])\n : escape(cap[0])\n : cap[0],\n }\n }\n }\n\n link(src) {\n const cap = this.rules.inline.link.exec(src)\n if (cap) {\n const trimmedUrl = cap[2].trim()\n if (!this.options.pedantic && /^$/.test(trimmedUrl)) {\n return\n }\n\n // ending angle bracket cannot be escaped\n const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), "\\\\")\n if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {\n return\n }\n } else {\n // find closing parenthesis\n const lastParenIndex = findClosingBracket(cap[2], "()")\n if (lastParenIndex > -1) {\n const start = cap[0].indexOf("!") === 0 ? 5 : 4\n const linkLen = start + cap[1].length + lastParenIndex\n cap[2] = cap[2].substring(0, lastParenIndex)\n cap[0] = cap[0].substring(0, linkLen).trim()\n cap[3] = ""\n }\n }\n let href = cap[2]\n let title = ""\n if (this.options.pedantic) {\n // split pedantic href and title\n const link = /^([^\'"]*[^\\s])\\s+([\'"])(.*)\\2/.exec(href)\n\n if (link) {\n href = link[1]\n title = link[3]\n }\n } else {\n title = cap[3] ? cap[3].slice(1, -1) : ""\n }\n\n href = href.trim()\n if (/^$/.test(trimmedUrl)) {\n // pedantic allows starting angle bracket without ending angle bracket\n href = href.slice(1)\n } else {\n href = href.slice(1, -1)\n }\n }\n return outputLink(\n cap,\n {\n href: href ? href.replace(this.rules.inline._escapes, "$1") : href,\n title: title\n ? title.replace(this.rules.inline._escapes, "$1")\n : title,\n },\n cap[0],\n this.lexer\n )\n }\n }\n\n reflink(src, links) {\n let cap\n if (\n (cap = this.rules.inline.reflink.exec(src)) ||\n (cap = this.rules.inline.nolink.exec(src))\n ) {\n let link = (cap[2] || cap[1]).replace(/\\s+/g, " ")\n link = links[link.toLowerCase()]\n if (!link || !link.href) {\n const text = cap[0].charAt(0)\n return {\n type: "text",\n raw: text,\n text,\n }\n }\n return outputLink(cap, link, cap[0], this.lexer)\n }\n }\n\n emStrong(src, maskedSrc, prevChar = "") {\n let match = this.rules.inline.emStrong.lDelim.exec(src)\n if (!match) return\n\n // _ can\'t be between two alphanumerics. \\p{L}\\p{N} includes non-english alphabet/numbers as well\n if (match[3] && prevChar.match(/[\\p{L}\\p{N}]/u)) return\n\n const nextChar = match[1] || match[2] || ""\n\n if (\n !nextChar ||\n (nextChar &&\n (prevChar === "" || this.rules.inline.punctuation.exec(prevChar)))\n ) {\n const lLength = match[0].length - 1\n let rDelim,\n rLength,\n delimTotal = lLength,\n midDelimTotal = 0\n\n const endReg =\n match[0][0] === "*"\n ? this.rules.inline.emStrong.rDelimAst\n : this.rules.inline.emStrong.rDelimUnd\n endReg.lastIndex = 0\n\n // Clip maskedSrc to same section of string as src (move to lexer?)\n maskedSrc = maskedSrc.slice(-1 * src.length + lLength)\n\n while ((match = endReg.exec(maskedSrc)) != null) {\n rDelim =\n match[1] || match[2] || match[3] || match[4] || match[5] || match[6]\n\n if (!rDelim) continue // skip single * in __abc*abc__\n\n rLength = rDelim.length\n\n if (match[3] || match[4]) {\n // found another Left Delim\n delimTotal += rLength\n continue\n } else if (match[5] || match[6]) {\n // either Left or Right Delim\n if (lLength % 3 && !((lLength + rLength) % 3)) {\n midDelimTotal += rLength\n continue // CommonMark Emphasis Rules 9-10\n }\n }\n\n delimTotal -= rLength\n\n if (delimTotal > 0) continue // Haven\'t found enough closing delimiters\n\n // Remove extra characters. *a*** -> *a*\n rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal)\n\n // Create `em` if smallest delimiter has odd char count. *a***\n if (Math.min(lLength, rLength) % 2) {\n const text = src.slice(1, lLength + match.index + rLength)\n return {\n type: "em",\n raw: src.slice(0, lLength + match.index + rLength + 1),\n text,\n tokens: this.lexer.inlineTokens(text, []),\n }\n }\n\n // Create \'strong\' if smallest delimiter has even char count. **a***\n const text = src.slice(2, lLength + match.index + rLength - 1)\n return {\n type: "strong",\n raw: src.slice(0, lLength + match.index + rLength + 1),\n text,\n tokens: this.lexer.inlineTokens(text, []),\n }\n }\n }\n }\n\n codespan(src) {\n const cap = this.rules.inline.code.exec(src)\n if (cap) {\n let text = cap[2].replace(/\\n/g, " ")\n const hasNonSpaceChars = /[^ ]/.test(text)\n const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text)\n if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {\n text = text.substring(1, text.length - 1)\n }\n text = escape(text, true)\n return {\n type: "codespan",\n raw: cap[0],\n text,\n }\n }\n }\n\n br(src) {\n const cap = this.rules.inline.br.exec(src)\n if (cap) {\n return {\n type: "br",\n raw: cap[0],\n }\n }\n }\n\n del(src) {\n const cap = this.rules.inline.del.exec(src)\n if (cap) {\n return {\n type: "del",\n raw: cap[0],\n text: cap[2],\n tokens: this.lexer.inlineTokens(cap[2], []),\n }\n }\n }\n\n autolink(src, mangle) {\n const cap = this.rules.inline.autolink.exec(src)\n if (cap) {\n let text, href\n if (cap[2] === "@") {\n text = escape(this.options.mangle ? mangle(cap[1]) : cap[1])\n href = "mailto:" + text\n } else {\n text = escape(cap[1])\n href = text\n }\n\n return {\n type: "link",\n raw: cap[0],\n text,\n href,\n tokens: [\n {\n type: "text",\n raw: text,\n text,\n },\n ],\n }\n }\n }\n\n url(src, mangle) {\n let cap\n if ((cap = this.rules.inline.url.exec(src))) {\n let text, href\n if (cap[2] === "@") {\n text = escape(this.options.mangle ? mangle(cap[0]) : cap[0])\n href = "mailto:" + text\n } else {\n // do extended autolink path validation\n let prevCapZero\n do {\n prevCapZero = cap[0]\n cap[0] = this.rules.inline._backpedal.exec(cap[0])[0]\n } while (prevCapZero !== cap[0])\n text = escape(cap[0])\n if (cap[1] === "www.") {\n href = "http://" + text\n } else {\n href = text\n }\n }\n return {\n type: "link",\n raw: cap[0],\n text,\n href,\n tokens: [\n {\n type: "text",\n raw: text,\n text,\n },\n ],\n }\n }\n }\n\n inlineText(src, smartypants) {\n const cap = this.rules.inline.text.exec(src)\n if (cap) {\n let text\n if (this.lexer.state.inRawBlock) {\n text = this.options.sanitize\n ? this.options.sanitizer\n ? this.options.sanitizer(cap[0])\n : escape(cap[0])\n : cap[0]\n } else {\n text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0])\n }\n return {\n type: "text",\n raw: cap[0],\n text,\n }\n }\n }\n}\n\n/**\n * Block-Level Grammar\n */\nconst block = {\n newline: /^(?: *(?:\\n|$))+/,\n code: /^( {4}[^\\n]+(?:\\n(?: *(?:\\n|$))*)?)+/,\n fences:\n /^ {0,3}(`{3,}(?=[^`\\n]*\\n)|~{3,})([^\\n]*)\\n(?:|([\\s\\S]*?)\\n)(?: {0,3}\\1[~`]* *(?=\\n|$)|$)/,\n hr: /^ {0,3}((?:-[\\t ]*){3,}|(?:_[ \\t]*){3,}|(?:\\*[ \\t]*){3,})(?:\\n+|$)/,\n heading: /^ {0,3}(#{1,6})(?=\\s|$)(.*)(?:\\n+|$)/,\n blockquote: /^( {0,3}> ?(paragraph|[^\\n]*)(?:\\n|$))+/,\n list: /^( {0,3}bull)([ \\t][^\\n]+?)?(?:\\n|$)/,\n html:\n "^ {0,3}(?:" + // optional indentation\n "<(script|pre|style|textarea)[\\\\s>][\\\\s\\\\S]*?(?:\\\\1>[^\\\\n]*\\\\n+|$)" + // (1)\n "|comment[^\\\\n]*(\\\\n+|$)" + // (2)\n "|<\\\\?[\\\\s\\\\S]*?(?:\\\\?>\\\\n*|$)" + // (3)\n "|\\\\n*|$)" + // (4)\n "|\\\\n*|$)" + // (5)\n "|?(tag)(?: +|\\\\n|/?>)[\\\\s\\\\S]*?(?:(?:\\\\n *)+\\\\n|$)" + // (6)\n "|<(?!script|pre|style|textarea)([a-z][\\\\w-]*)(?:attribute)*? */?>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:(?:\\\\n *)+\\\\n|$)" + // (7) open tag\n "|(?!script|pre|style|textarea)[a-z][\\\\w-]*\\\\s*>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:(?:\\\\n *)+\\\\n|$)" + // (7) closing tag\n ")",\n def: /^ {0,3}\\[(label)\\]: *(?:\\n *)?([^\\s>]+)>?(?:(?: +(?:\\n *)?| *\\n *)(title))? *(?:\\n+|$)/,\n table: noopTest,\n lheading: /^([^\\n]+)\\n {0,3}(=+|-+) *(?:\\n+|$)/,\n // regex template, placeholders will be replaced according to different paragraph\n // interruption rules of commonmark and the original markdown spec:\n _paragraph:\n /^([^\\n]+(?:\\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\\n)[^\\n]+)*)/,\n text: /^[^\\n]+/,\n}\n\nblock._label = /(?!\\s*\\])(?:\\\\.|[^\\[\\]\\\\])+/\nblock._title = /(?:"(?:\\\\"?|[^"\\\\])*"|\'[^\'\\n]*(?:\\n[^\'\\n]+)*\\n?\'|\\([^()]*\\))/\nblock.def = edit(block.def)\n .replace("label", block._label)\n .replace("title", block._title)\n .getRegex()\n\nblock.bullet = /(?:[*+-]|\\d{1,9}[.)])/\nblock.listItemStart = edit(/^( *)(bull) */)\n .replace("bull", block.bullet)\n .getRegex()\n\nblock.list = edit(block.list)\n .replace(/bull/g, block.bullet)\n .replace(\n "hr",\n "\\\\n+(?=\\\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\\\* *){3,})(?:\\\\n+|$))"\n )\n .replace("def", "\\\\n+(?=" + block.def.source + ")")\n .getRegex()\n\nblock._tag =\n "address|article|aside|base|basefont|blockquote|body|caption" +\n "|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption" +\n "|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe" +\n "|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option" +\n "|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr" +\n "|track|ul"\nblock._comment = /\x3C!--(?!-?>)[\\s\\S]*?(?:-->|$)/\nblock.html = edit(block.html, "i")\n .replace("comment", block._comment)\n .replace("tag", block._tag)\n .replace(\n "attribute",\n / +[a-zA-Z:_][\\w.:-]*(?: *= *"[^"\\n]*"| *= *\'[^\'\\n]*\'| *= *[^\\s"\'=<>`]+)?/\n )\n .getRegex()\n\nblock.paragraph = edit(block._paragraph)\n .replace("hr", block.hr)\n .replace("heading", " {0,3}#{1,6} ")\n .replace("|lheading", "") // setex headings don\'t interrupt commonmark paragraphs\n .replace("|table", "")\n .replace("blockquote", " {0,3}>")\n .replace("fences", " {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n")\n .replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt\n .replace(\n "html",\n "?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)"\n )\n .replace("tag", block._tag) // pars can be interrupted by type (6) html blocks\n .getRegex()\n\nblock.blockquote = edit(block.blockquote)\n .replace("paragraph", block.paragraph)\n .getRegex()\n\n/**\n * Normal Block Grammar\n */\n\nblock.normal = merge({}, block)\n\n/**\n * GFM Block Grammar\n */\n\nblock.gfm = merge({}, block.normal, {\n table:\n "^ *([^\\\\n ].*\\\\|.*)\\\\n" + // Header\n " {0,3}(?:\\\\| *)?(:?-+:? *(?:\\\\| *:?-+:? *)*)(?:\\\\| *)?" + // Align\n "(?:\\\\n((?:(?! *\\\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\\\n|$))*)\\\\n*|$)", // Cells\n})\n\nblock.gfm.table = edit(block.gfm.table)\n .replace("hr", block.hr)\n .replace("heading", " {0,3}#{1,6} ")\n .replace("blockquote", " {0,3}>")\n .replace("code", " {4}[^\\\\n]")\n .replace("fences", " {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n")\n .replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt\n .replace(\n "html",\n "?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)"\n )\n .replace("tag", block._tag) // tables can be interrupted by type (6) html blocks\n .getRegex()\n\nblock.gfm.paragraph = edit(block._paragraph)\n .replace("hr", block.hr)\n .replace("heading", " {0,3}#{1,6} ")\n .replace("|lheading", "") // setex headings don\'t interrupt commonmark paragraphs\n .replace("table", block.gfm.table) // interrupt paragraphs with table\n .replace("blockquote", " {0,3}>")\n .replace("fences", " {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n")\n .replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt\n .replace(\n "html",\n "?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)"\n )\n .replace("tag", block._tag) // pars can be interrupted by type (6) html blocks\n .getRegex()\n/**\n * Pedantic grammar (original John Gruber\'s loose markdown specification)\n */\n\nblock.pedantic = merge({}, block.normal, {\n html: edit(\n "^ *(?:comment *(?:\\\\n|\\\\s*$)" +\n "|<(tag)[\\\\s\\\\S]+?\\\\1> *(?:\\\\n{2,}|\\\\s*$)" + // closed tag\n "| ${text}
\\n"\n )\n }\n\n return (\n \'" +\n (escaped ? code : escape(code, true)) +\n "
\\n"\n )\n }\n\n /**\n * @param {string} quote\n */\n blockquote(quote) {\n return `\' +\n (escaped ? code : escape(code, true)) +\n "
\\n${quote}
\\n`\n }\n\n html(html) {\n return html\n }\n\n /**\n * @param {string} text\n * @param {string} level\n * @param {string} raw\n * @param {any} slugger\n */\n heading(text, level, raw, slugger) {\n if (this.options.headerIds) {\n const id = this.options.headerPrefix + slugger.slug(raw)\n return `
\\n" : "
\\n"\n }\n\n list(body, ordered, start) {\n const type = ordered ? "ol" : "ul",\n startatt = ordered && start !== 1 ? \' start="\' + start + \'"\' : ""\n return "<" + type + startatt + ">\\n" + body + "" + type + ">\\n"\n }\n\n /**\n * @param {string} text\n */\n listitem(text) {\n return `
${text}
`\n }\n\n br() {\n return this.options.xhtml ? "An error occurred:
" +\n escape(e.message + "", true) +\n ""\n )\n }\n throw e\n }\n}\n\n/**\n * Options\n */\n\nmarked.options = marked.setOptions = function (opt) {\n merge(marked.defaults, opt)\n changeDefaults(marked.defaults)\n return marked\n}\n\nmarked.getDefaults = getDefaults\n\nmarked.defaults = defaults\n\n/**\n * Use Extension\n */\n\nmarked.use = function (...args) {\n const opts = merge({}, ...args)\n const extensions = marked.defaults.extensions || {\n renderers: {},\n childTokens: {},\n }\n let hasExtensions\n\n args.forEach(pack => {\n // ==-- Parse "addon" extensions --== //\n if (pack.extensions) {\n hasExtensions = true\n pack.extensions.forEach(ext => {\n if (!ext.name) {\n throw new Error("extension name required")\n }\n if (ext.renderer) {\n // Renderer extensions\n const prevRenderer = extensions.renderers\n ? extensions.renderers[ext.name]\n : null\n if (prevRenderer) {\n // Replace extension with func to run new extension but fall back if false\n extensions.renderers[ext.name] = function (...args) {\n let ret = ext.renderer.apply(this, args)\n if (ret === false) {\n ret = prevRenderer.apply(this, args)\n }\n return ret\n }\n } else {\n extensions.renderers[ext.name] = ext.renderer\n }\n }\n if (ext.tokenizer) {\n // Tokenizer Extensions\n if (!ext.level || (ext.level !== "block" && ext.level !== "inline")) {\n throw new Error("extension level must be \'block\' or \'inline\'")\n }\n if (extensions[ext.level]) {\n extensions[ext.level].unshift(ext.tokenizer)\n } else {\n extensions[ext.level] = [ext.tokenizer]\n }\n if (ext.start) {\n // Function to check for start of token\n if (ext.level === "block") {\n if (extensions.startBlock) {\n extensions.startBlock.push(ext.start)\n } else {\n extensions.startBlock = [ext.start]\n }\n } else if (ext.level === "inline") {\n if (extensions.startInline) {\n extensions.startInline.push(ext.start)\n } else {\n extensions.startInline = [ext.start]\n }\n }\n }\n }\n if (ext.childTokens) {\n // Child tokens to be visited by walkTokens\n extensions.childTokens[ext.name] = ext.childTokens\n }\n })\n }\n\n // ==-- Parse "overwrite" extensions --== //\n if (pack.renderer) {\n const renderer = marked.defaults.renderer || new Renderer()\n for (const prop in pack.renderer) {\n const prevRenderer = renderer[prop]\n // Replace renderer with func to run extension, but fall back if false\n renderer[prop] = (...args) => {\n let ret = pack.renderer[prop].apply(renderer, args)\n if (ret === false) {\n ret = prevRenderer.apply(renderer, args)\n }\n return ret\n }\n }\n opts.renderer = renderer\n }\n if (pack.tokenizer) {\n const tokenizer = marked.defaults.tokenizer || new Tokenizer()\n for (const prop in pack.tokenizer) {\n const prevTokenizer = tokenizer[prop]\n // Replace tokenizer with func to run extension, but fall back if false\n tokenizer[prop] = (...args) => {\n let ret = pack.tokenizer[prop].apply(tokenizer, args)\n if (ret === false) {\n ret = prevTokenizer.apply(tokenizer, args)\n }\n return ret\n }\n }\n opts.tokenizer = tokenizer\n }\n\n // ==-- Parse WalkTokens extensions --== //\n if (pack.walkTokens) {\n const walkTokens = marked.defaults.walkTokens\n opts.walkTokens = function (token) {\n pack.walkTokens.call(this, token)\n if (walkTokens) {\n walkTokens.call(this, token)\n }\n }\n }\n\n if (hasExtensions) {\n opts.extensions = extensions\n }\n\n marked.setOptions(opts)\n })\n}\n\n/**\n * Run callback for every token\n */\n\nmarked.walkTokens = function (tokens, callback) {\n for (const token of tokens) {\n callback.call(marked, token)\n switch (token.type) {\n case "table": {\n for (const cell of token.header) {\n marked.walkTokens(cell.tokens, callback)\n }\n for (const row of token.rows) {\n for (const cell of row) {\n marked.walkTokens(cell.tokens, callback)\n }\n }\n break\n }\n case "list": {\n marked.walkTokens(token.items, callback)\n break\n }\n default: {\n if (\n marked.defaults.extensions &&\n marked.defaults.extensions.childTokens &&\n marked.defaults.extensions.childTokens[token.type]\n ) {\n // Walk any extensions\n marked.defaults.extensions.childTokens[token.type].forEach(function (\n childTokens\n ) {\n marked.walkTokens(token[childTokens], callback)\n })\n } else if (token.tokens) {\n marked.walkTokens(token.tokens, callback)\n }\n }\n }\n }\n}\n\n/**\n * Parse Inline\n * @param {string} src\n */\nmarked.parseInline = function (src, opt) {\n // throw error in case of non string input\n if (typeof src === "undefined" || src === null) {\n throw new Error(\n "marked.parseInline(): input parameter is undefined or null"\n )\n }\n if (typeof src !== "string") {\n throw new Error(\n "marked.parseInline(): input parameter is of type " +\n Object.prototype.toString.call(src) +\n ", string expected"\n )\n }\n\n opt = merge({}, marked.defaults, opt || {})\n checkSanitizeDeprecation(opt)\n\n try {\n const tokens = Lexer.lexInline(src, opt)\n if (opt.walkTokens) {\n marked.walkTokens(tokens, opt.walkTokens)\n }\n return Parser.parseInline(tokens, opt)\n } catch (e) {\n e.message += "\\nPlease report this to https://github.com/markedjs/marked."\n if (opt.silent) {\n return (\n "
An error occurred:
" +\n escape(e.message + "", true) +\n ""\n )\n }\n throw e\n }\n}\n\n/**\n * Expose\n */\nmarked.Parser = Parser\nmarked.parser = Parser.parse\nmarked.Renderer = Renderer\nmarked.TextRenderer = TextRenderer\nmarked.Lexer = Lexer\nmarked.lexer = Lexer.lex\nmarked.Tokenizer = Tokenizer\nmarked.Slugger = Slugger\nmarked.parse = marked\n\nconst options = marked.options\nconst setOptions = marked.setOptions\nconst use = marked.use\nconst walkTokens = marked.walkTokens\nconst parseInline = marked.parseInline\nconst parse = marked\nconst parser = Parser.parse\nconst lexer = Lexer.lex\n\nconst email = trigger.row\nreturn marked(email.Message)'