`):\n *\n * ```javascript\n * var hljs = require('highlight.js') // https://highlightjs.org/\n *\n * // Actual default values\n * var md = require('markdown-it')({\n * highlight: function (str, lang) {\n * if (lang && hljs.getLanguage(lang)) {\n * try {\n * return '' +\n * hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +\n * '
';\n * } catch (__) {}\n * }\n *\n * return '' + md.utils.escapeHtml(str) + '
';\n * }\n * });\n * ```\n *\n **/\n\n\nfunction MarkdownIt(presetName, options) {\n if (!(this instanceof MarkdownIt)) {\n return new MarkdownIt(presetName, options);\n }\n\n if (!options) {\n if (!utils.isString(presetName)) {\n options = presetName || {};\n presetName = 'default';\n }\n }\n /**\n * MarkdownIt#inline -> ParserInline\n *\n * Instance of [[ParserInline]]. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n\n\n this.inline = new ParserInline();\n /**\n * MarkdownIt#block -> ParserBlock\n *\n * Instance of [[ParserBlock]]. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n\n this.block = new ParserBlock();\n /**\n * MarkdownIt#core -> Core\n *\n * Instance of [[Core]] chain executor. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n\n this.core = new ParserCore();\n /**\n * MarkdownIt#renderer -> Renderer\n *\n * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering\n * rules for new token types, generated by plugins.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * function myToken(tokens, idx, options, env, self) {\n * //...\n * return result;\n * };\n *\n * md.renderer.rules['my_token'] = myToken\n * ```\n *\n * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js).\n **/\n\n this.renderer = new Renderer();\n /**\n * MarkdownIt#linkify -> LinkifyIt\n *\n * [linkify-it](https://github.com/markdown-it/linkify-it) instance.\n * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.js)\n * rule.\n **/\n\n this.linkify = new LinkifyIt();\n /**\n * MarkdownIt#validateLink(url) -> Boolean\n *\n * Link validation function. CommonMark allows too much in links. By default\n * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas\n * except some embedded image types.\n *\n * You can change this behaviour:\n *\n * ```javascript\n * var md = require('markdown-it')();\n * // enable everything\n * md.validateLink = function () { return true; }\n * ```\n **/\n\n this.validateLink = validateLink;\n /**\n * MarkdownIt#normalizeLink(url) -> String\n *\n * Function used to encode link url to a machine-readable format,\n * which includes url-encoding, punycode, etc.\n **/\n\n this.normalizeLink = normalizeLink;\n /**\n * MarkdownIt#normalizeLinkText(url) -> String\n *\n * Function used to decode link url to a human-readable format`\n **/\n\n this.normalizeLinkText = normalizeLinkText; // Expose utils & helpers for easy acces from plugins\n\n /**\n * MarkdownIt#utils -> utils\n *\n * Assorted utility functions, useful to write plugins. See details\n * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.js).\n **/\n\n this.utils = utils;\n /**\n * MarkdownIt#helpers -> helpers\n *\n * Link components parser functions, useful to write plugins. See details\n * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).\n **/\n\n this.helpers = utils.assign({}, helpers);\n this.options = {};\n this.configure(presetName);\n\n if (options) {\n this.set(options);\n }\n}\n/** chainable\n * MarkdownIt.set(options)\n *\n * Set parser options (in the same format as in constructor). Probably, you\n * will never need it, but you can change options after constructor call.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')()\n * .set({ html: true, breaks: true })\n * .set({ typographer, true });\n * ```\n *\n * __Note:__ To achieve the best possible performance, don't modify a\n * `markdown-it` instance options on the fly. If you need multiple configurations\n * it's best to create multiple instances and initialize each with separate\n * config.\n **/\n\n\nMarkdownIt.prototype.set = function (options) {\n utils.assign(this.options, options);\n return this;\n};\n/** chainable, internal\n * MarkdownIt.configure(presets)\n *\n * Batch load of all options and compenent settings. This is internal method,\n * and you probably will not need it. But if you will - see available presets\n * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)\n *\n * We strongly recommend to use presets instead of direct config loads. That\n * will give better compatibility with next versions.\n **/\n\n\nMarkdownIt.prototype.configure = function (presets) {\n var self = this,\n presetName;\n\n if (utils.isString(presets)) {\n presetName = presets;\n presets = config[presetName];\n\n if (!presets) {\n throw new Error('Wrong `markdown-it` preset \"' + presetName + '\", check name');\n }\n }\n\n if (!presets) {\n throw new Error('Wrong `markdown-it` preset, can\\'t be empty');\n }\n\n if (presets.options) {\n self.set(presets.options);\n }\n\n if (presets.components) {\n Object.keys(presets.components).forEach(function (name) {\n if (presets.components[name].rules) {\n self[name].ruler.enableOnly(presets.components[name].rules);\n }\n\n if (presets.components[name].rules2) {\n self[name].ruler2.enableOnly(presets.components[name].rules2);\n }\n });\n }\n\n return this;\n};\n/** chainable\n * MarkdownIt.enable(list, ignoreInvalid)\n * - list (String|Array): rule name or list of rule names to enable\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * Enable list or rules. It will automatically find appropriate components,\n * containing rules with given names. If rule not found, and `ignoreInvalid`\n * not set - throws exception.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')()\n * .enable(['sub', 'sup'])\n * .disable('smartquotes');\n * ```\n **/\n\n\nMarkdownIt.prototype.enable = function (list, ignoreInvalid) {\n var result = [];\n\n if (!Array.isArray(list)) {\n list = [list];\n }\n\n ['core', 'block', 'inline'].forEach(function (chain) {\n result = result.concat(this[chain].ruler.enable(list, true));\n }, this);\n result = result.concat(this.inline.ruler2.enable(list, true));\n var missed = list.filter(function (name) {\n return result.indexOf(name) < 0;\n });\n\n if (missed.length && !ignoreInvalid) {\n throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);\n }\n\n return this;\n};\n/** chainable\n * MarkdownIt.disable(list, ignoreInvalid)\n * - list (String|Array): rule name or list of rule names to disable.\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * The same as [[MarkdownIt.enable]], but turn specified rules off.\n **/\n\n\nMarkdownIt.prototype.disable = function (list, ignoreInvalid) {\n var result = [];\n\n if (!Array.isArray(list)) {\n list = [list];\n }\n\n ['core', 'block', 'inline'].forEach(function (chain) {\n result = result.concat(this[chain].ruler.disable(list, true));\n }, this);\n result = result.concat(this.inline.ruler2.disable(list, true));\n var missed = list.filter(function (name) {\n return result.indexOf(name) < 0;\n });\n\n if (missed.length && !ignoreInvalid) {\n throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);\n }\n\n return this;\n};\n/** chainable\n * MarkdownIt.use(plugin, params)\n *\n * Load specified plugin with given params into current parser instance.\n * It's just a sugar to call `plugin(md, params)` with curring.\n *\n * ##### Example\n *\n * ```javascript\n * var iterator = require('markdown-it-for-inline');\n * var md = require('markdown-it')()\n * .use(iterator, 'foo_replace', 'text', function (tokens, idx) {\n * tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');\n * });\n * ```\n **/\n\n\nMarkdownIt.prototype.use = function (plugin\n/*, params, ... */\n) {\n var args = [this].concat(Array.prototype.slice.call(arguments, 1));\n plugin.apply(plugin, args);\n return this;\n};\n/** internal\n * MarkdownIt.parse(src, env) -> Array\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Parse input string and return list of block tokens (special token type\n * \"inline\" will contain list of inline tokens). You should not call this\n * method directly, until you write custom renderer (for example, to produce\n * AST).\n *\n * `env` is used to pass data between \"distributed\" rules and return additional\n * metadata like reference info, needed for the renderer. It also can be used to\n * inject data in specific cases. Usually, you will be ok to pass `{}`,\n * and then pass updated object to renderer.\n **/\n\n\nMarkdownIt.prototype.parse = function (src, env) {\n if (typeof src !== 'string') {\n throw new Error('Input data should be a String');\n }\n\n var state = new this.core.State(src, this, env);\n this.core.process(state);\n return state.tokens;\n};\n/**\n * MarkdownIt.render(src [, env]) -> String\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Render markdown string into html. It does all magic for you :).\n *\n * `env` can be used to inject additional metadata (`{}` by default).\n * But you will not need it with high probability. See also comment\n * in [[MarkdownIt.parse]].\n **/\n\n\nMarkdownIt.prototype.render = function (src, env) {\n env = env || {};\n return this.renderer.render(this.parse(src, env), this.options, env);\n};\n/** internal\n * MarkdownIt.parseInline(src, env) -> Array\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the\n * block tokens list with the single `inline` element, containing parsed inline\n * tokens in `children` property. Also updates `env` object.\n **/\n\n\nMarkdownIt.prototype.parseInline = function (src, env) {\n var state = new this.core.State(src, this, env);\n state.inlineMode = true;\n this.core.process(state);\n return state.tokens;\n};\n/**\n * MarkdownIt.renderInline(src [, env]) -> String\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Similar to [[MarkdownIt.render]] but for single paragraph content. Result\n * will NOT be wrapped into `` tags.\n **/\n\n\nMarkdownIt.prototype.renderInline = function (src, env) {\n env = env || {};\n return this.renderer.render(this.parseInline(src, env), this.options, env);\n};\n\nmodule.exports = MarkdownIt;","'use strict';\n\nvar encodeCache = {}; // Create a lookup array where anything but characters in `chars` string\n// and alphanumeric chars is percent-encoded.\n//\n\nfunction getEncodeCache(exclude) {\n var i,\n ch,\n cache = encodeCache[exclude];\n\n if (cache) {\n return cache;\n }\n\n cache = encodeCache[exclude] = [];\n\n for (i = 0; i < 128; i++) {\n ch = String.fromCharCode(i);\n\n if (/^[0-9a-z]$/i.test(ch)) {\n // always allow unencoded alphanumeric characters\n cache.push(ch);\n } else {\n cache.push('%' + ('0' + i.toString(16).toUpperCase()).slice(-2));\n }\n }\n\n for (i = 0; i < exclude.length; i++) {\n cache[exclude.charCodeAt(i)] = exclude[i];\n }\n\n return cache;\n} // Encode unsafe characters with percent-encoding, skipping already\n// encoded sequences.\n//\n// - string - string to encode\n// - exclude - list of characters to ignore (in addition to a-zA-Z0-9)\n// - keepEscaped - don't encode '%' in a correct escape sequence (default: true)\n//\n\n\nfunction encode(string, exclude, keepEscaped) {\n var i,\n l,\n code,\n nextCode,\n cache,\n result = '';\n\n if (typeof exclude !== 'string') {\n // encode(string, keepEscaped)\n keepEscaped = exclude;\n exclude = encode.defaultChars;\n }\n\n if (typeof keepEscaped === 'undefined') {\n keepEscaped = true;\n }\n\n cache = getEncodeCache(exclude);\n\n for (i = 0, l = string.length; i < l; i++) {\n code = string.charCodeAt(i);\n\n if (keepEscaped && code === 0x25\n /* % */\n && i + 2 < l) {\n if (/^[0-9a-f]{2}$/i.test(string.slice(i + 1, i + 3))) {\n result += string.slice(i, i + 3);\n i += 2;\n continue;\n }\n }\n\n if (code < 128) {\n result += cache[code];\n continue;\n }\n\n if (code >= 0xD800 && code <= 0xDFFF) {\n if (code >= 0xD800 && code <= 0xDBFF && i + 1 < l) {\n nextCode = string.charCodeAt(i + 1);\n\n if (nextCode >= 0xDC00 && nextCode <= 0xDFFF) {\n result += encodeURIComponent(string[i] + string[i + 1]);\n i++;\n continue;\n }\n }\n\n result += '%EF%BF%BD';\n continue;\n }\n\n result += encodeURIComponent(string[i]);\n }\n\n return result;\n}\n\nencode.defaultChars = \";/?:@&=+$,-_.!~*'()#\";\nencode.componentChars = \"-_.!~*'()\";\nmodule.exports = encode;","'use strict';\n/* eslint-disable no-bitwise */\n\nvar decodeCache = {};\n\nfunction getDecodeCache(exclude) {\n var i,\n ch,\n cache = decodeCache[exclude];\n\n if (cache) {\n return cache;\n }\n\n cache = decodeCache[exclude] = [];\n\n for (i = 0; i < 128; i++) {\n ch = String.fromCharCode(i);\n cache.push(ch);\n }\n\n for (i = 0; i < exclude.length; i++) {\n ch = exclude.charCodeAt(i);\n cache[ch] = '%' + ('0' + ch.toString(16).toUpperCase()).slice(-2);\n }\n\n return cache;\n} // Decode percent-encoded string.\n//\n\n\nfunction decode(string, exclude) {\n var cache;\n\n if (typeof exclude !== 'string') {\n exclude = decode.defaultChars;\n }\n\n cache = getDecodeCache(exclude);\n return string.replace(/(%[a-f0-9]{2})+/gi, function (seq) {\n var i,\n l,\n b1,\n b2,\n b3,\n b4,\n chr,\n result = '';\n\n for (i = 0, l = seq.length; i < l; i += 3) {\n b1 = parseInt(seq.slice(i + 1, i + 3), 16);\n\n if (b1 < 0x80) {\n result += cache[b1];\n continue;\n }\n\n if ((b1 & 0xE0) === 0xC0 && i + 3 < l) {\n // 110xxxxx 10xxxxxx\n b2 = parseInt(seq.slice(i + 4, i + 6), 16);\n\n if ((b2 & 0xC0) === 0x80) {\n chr = b1 << 6 & 0x7C0 | b2 & 0x3F;\n\n if (chr < 0x80) {\n result += \"\\uFFFD\\uFFFD\";\n } else {\n result += String.fromCharCode(chr);\n }\n\n i += 3;\n continue;\n }\n }\n\n if ((b1 & 0xF0) === 0xE0 && i + 6 < l) {\n // 1110xxxx 10xxxxxx 10xxxxxx\n b2 = parseInt(seq.slice(i + 4, i + 6), 16);\n b3 = parseInt(seq.slice(i + 7, i + 9), 16);\n\n if ((b2 & 0xC0) === 0x80 && (b3 & 0xC0) === 0x80) {\n chr = b1 << 12 & 0xF000 | b2 << 6 & 0xFC0 | b3 & 0x3F;\n\n if (chr < 0x800 || chr >= 0xD800 && chr <= 0xDFFF) {\n result += \"\\uFFFD\\uFFFD\\uFFFD\";\n } else {\n result += String.fromCharCode(chr);\n }\n\n i += 6;\n continue;\n }\n }\n\n if ((b1 & 0xF8) === 0xF0 && i + 9 < l) {\n // 111110xx 10xxxxxx 10xxxxxx 10xxxxxx\n b2 = parseInt(seq.slice(i + 4, i + 6), 16);\n b3 = parseInt(seq.slice(i + 7, i + 9), 16);\n b4 = parseInt(seq.slice(i + 10, i + 12), 16);\n\n if ((b2 & 0xC0) === 0x80 && (b3 & 0xC0) === 0x80 && (b4 & 0xC0) === 0x80) {\n chr = b1 << 18 & 0x1C0000 | b2 << 12 & 0x3F000 | b3 << 6 & 0xFC0 | b4 & 0x3F;\n\n if (chr < 0x10000 || chr > 0x10FFFF) {\n result += \"\\uFFFD\\uFFFD\\uFFFD\\uFFFD\";\n } else {\n chr -= 0x10000;\n result += String.fromCharCode(0xD800 + (chr >> 10), 0xDC00 + (chr & 0x3FF));\n }\n\n i += 9;\n continue;\n }\n }\n\n result += \"\\uFFFD\";\n }\n\n return result;\n });\n}\n\ndecode.defaultChars = ';/?:@&=+$,#';\ndecode.componentChars = '';\nmodule.exports = decode;","'use strict';\n\nmodule.exports = function format(url) {\n var result = '';\n result += url.protocol || '';\n result += url.slashes ? '//' : '';\n result += url.auth ? url.auth + '@' : '';\n\n if (url.hostname && url.hostname.indexOf(':') !== -1) {\n // ipv6 address\n result += '[' + url.hostname + ']';\n } else {\n result += url.hostname || '';\n }\n\n result += url.port ? ':' + url.port : '';\n result += url.pathname || '';\n result += url.search || '';\n result += url.hash || '';\n return result;\n};","// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n'use strict'; //\n// Changes from joyent/node:\n//\n// 1. No leading slash in paths,\n// e.g. in `url.parse('http://foo?bar')` pathname is ``, not `/`\n//\n// 2. Backslashes are not replaced with slashes,\n// so `http:\\\\example.org\\` is treated like a relative path\n//\n// 3. Trailing colon is treated like a part of the path,\n// i.e. in `http://example.org:foo` pathname is `:foo`\n//\n// 4. Nothing is URL-encoded in the resulting object,\n// (in joyent/node some chars in auth and paths are encoded)\n//\n// 5. `url.parse()` does not have `parseQueryString` argument\n//\n// 6. Removed extraneous result properties: `host`, `path`, `query`, etc.,\n// which can be constructed using other parts of the url.\n//\n\nfunction Url() {\n this.protocol = null;\n this.slashes = null;\n this.auth = null;\n this.port = null;\n this.hostname = null;\n this.hash = null;\n this.search = null;\n this.pathname = null;\n} // Reference: RFC 3986, RFC 1808, RFC 2396\n// define these here so at least they only have to be\n// compiled once on the first module load.\n\n\nvar protocolPattern = /^([a-z0-9.+-]+:)/i,\n portPattern = /:[0-9]*$/,\n // Special case for a simple path URL\nsimplePathPattern = /^(\\/\\/?(?!\\/)[^\\?\\s]*)(\\?[^\\s]*)?$/,\n // RFC 2396: characters reserved for delimiting URLs.\n// We actually just auto-escape these.\ndelims = ['<', '>', '\"', '`', ' ', '\\r', '\\n', '\\t'],\n // RFC 2396: characters not allowed for various reasons.\nunwise = ['{', '}', '|', '\\\\', '^', '`'].concat(delims),\n // Allowed by RFCs, but cause of XSS attacks. Always escape these.\nautoEscape = ['\\''].concat(unwise),\n // Characters that are never ever allowed in a hostname.\n// Note that any invalid chars are also handled, but these\n// are the ones that are *expected* to be seen, so we fast-path\n// them.\nnonHostChars = ['%', '/', '?', ';', '#'].concat(autoEscape),\n hostEndingChars = ['/', '?', '#'],\n hostnameMaxLen = 255,\n hostnamePartPattern = /^[+a-z0-9A-Z_-]{0,63}$/,\n hostnamePartStart = /^([+a-z0-9A-Z_-]{0,63})(.*)$/,\n // protocols that can allow \"unsafe\" and \"unwise\" chars.\n\n/* eslint-disable no-script-url */\n// protocols that never have a hostname.\nhostlessProtocol = {\n 'javascript': true,\n 'javascript:': true\n},\n // protocols that always contain a // bit.\nslashedProtocol = {\n 'http': true,\n 'https': true,\n 'ftp': true,\n 'gopher': true,\n 'file': true,\n 'http:': true,\n 'https:': true,\n 'ftp:': true,\n 'gopher:': true,\n 'file:': true\n};\n/* eslint-enable no-script-url */\n\nfunction urlParse(url, slashesDenoteHost) {\n if (url && url instanceof Url) {\n return url;\n }\n\n var u = new Url();\n u.parse(url, slashesDenoteHost);\n return u;\n}\n\nUrl.prototype.parse = function (url, slashesDenoteHost) {\n var i,\n l,\n lowerProto,\n hec,\n slashes,\n rest = url; // trim before proceeding.\n // This is to support parse stuff like \" http://foo.com \\n\"\n\n rest = rest.trim();\n\n if (!slashesDenoteHost && url.split('#').length === 1) {\n // Try fast path regexp\n var simplePath = simplePathPattern.exec(rest);\n\n if (simplePath) {\n this.pathname = simplePath[1];\n\n if (simplePath[2]) {\n this.search = simplePath[2];\n }\n\n return this;\n }\n }\n\n var proto = protocolPattern.exec(rest);\n\n if (proto) {\n proto = proto[0];\n lowerProto = proto.toLowerCase();\n this.protocol = proto;\n rest = rest.substr(proto.length);\n } // figure out if it's got a host\n // user@server is *always* interpreted as a hostname, and url\n // resolution will treat //foo/bar as host=foo,path=bar because that's\n // how the browser resolves relative URLs.\n\n\n if (slashesDenoteHost || proto || rest.match(/^\\/\\/[^@\\/]+@[^@\\/]+/)) {\n slashes = rest.substr(0, 2) === '//';\n\n if (slashes && !(proto && hostlessProtocol[proto])) {\n rest = rest.substr(2);\n this.slashes = true;\n }\n }\n\n if (!hostlessProtocol[proto] && (slashes || proto && !slashedProtocol[proto])) {\n // there's a hostname.\n // the first instance of /, ?, ;, or # ends the host.\n //\n // If there is an @ in the hostname, then non-host chars *are* allowed\n // to the left of the last @ sign, unless some host-ending character\n // comes *before* the @-sign.\n // URLs are obnoxious.\n //\n // ex:\n // http://a@b@c/ => user:a@b host:c\n // http://a@b?@c => user:a host:c path:/?@c\n // v0.12 TODO(isaacs): This is not quite how Chrome does things.\n // Review our test case against browsers more comprehensively.\n // find the first instance of any hostEndingChars\n var hostEnd = -1;\n\n for (i = 0; i < hostEndingChars.length; i++) {\n hec = rest.indexOf(hostEndingChars[i]);\n\n if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) {\n hostEnd = hec;\n }\n } // at this point, either we have an explicit point where the\n // auth portion cannot go past, or the last @ char is the decider.\n\n\n var auth, atSign;\n\n if (hostEnd === -1) {\n // atSign can be anywhere.\n atSign = rest.lastIndexOf('@');\n } else {\n // atSign must be in auth portion.\n // http://a@b/c@d => host:b auth:a path:/c@d\n atSign = rest.lastIndexOf('@', hostEnd);\n } // Now we have a portion which is definitely the auth.\n // Pull that off.\n\n\n if (atSign !== -1) {\n auth = rest.slice(0, atSign);\n rest = rest.slice(atSign + 1);\n this.auth = auth;\n } // the host is the remaining to the left of the first non-host char\n\n\n hostEnd = -1;\n\n for (i = 0; i < nonHostChars.length; i++) {\n hec = rest.indexOf(nonHostChars[i]);\n\n if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) {\n hostEnd = hec;\n }\n } // if we still have not hit it, then the entire thing is a host.\n\n\n if (hostEnd === -1) {\n hostEnd = rest.length;\n }\n\n if (rest[hostEnd - 1] === ':') {\n hostEnd--;\n }\n\n var host = rest.slice(0, hostEnd);\n rest = rest.slice(hostEnd); // pull out port.\n\n this.parseHost(host); // we've indicated that there is a hostname,\n // so even if it's empty, it has to be present.\n\n this.hostname = this.hostname || ''; // if hostname begins with [ and ends with ]\n // assume that it's an IPv6 address.\n\n var ipv6Hostname = this.hostname[0] === '[' && this.hostname[this.hostname.length - 1] === ']'; // validate a little.\n\n if (!ipv6Hostname) {\n var hostparts = this.hostname.split(/\\./);\n\n for (i = 0, l = hostparts.length; i < l; i++) {\n var part = hostparts[i];\n\n if (!part) {\n continue;\n }\n\n if (!part.match(hostnamePartPattern)) {\n var newpart = '';\n\n for (var j = 0, k = part.length; j < k; j++) {\n if (part.charCodeAt(j) > 127) {\n // we replace non-ASCII char with a temporary placeholder\n // we need this to make sure size of hostname is not\n // broken by replacing non-ASCII by nothing\n newpart += 'x';\n } else {\n newpart += part[j];\n }\n } // we test again with ASCII char only\n\n\n if (!newpart.match(hostnamePartPattern)) {\n var validParts = hostparts.slice(0, i);\n var notHost = hostparts.slice(i + 1);\n var bit = part.match(hostnamePartStart);\n\n if (bit) {\n validParts.push(bit[1]);\n notHost.unshift(bit[2]);\n }\n\n if (notHost.length) {\n rest = notHost.join('.') + rest;\n }\n\n this.hostname = validParts.join('.');\n break;\n }\n }\n }\n }\n\n if (this.hostname.length > hostnameMaxLen) {\n this.hostname = '';\n } // strip [ and ] from the hostname\n // the host field still retains them, though\n\n\n if (ipv6Hostname) {\n this.hostname = this.hostname.substr(1, this.hostname.length - 2);\n }\n } // chop off from the tail first.\n\n\n var hash = rest.indexOf('#');\n\n if (hash !== -1) {\n // got a fragment string.\n this.hash = rest.substr(hash);\n rest = rest.slice(0, hash);\n }\n\n var qm = rest.indexOf('?');\n\n if (qm !== -1) {\n this.search = rest.substr(qm);\n rest = rest.slice(0, qm);\n }\n\n if (rest) {\n this.pathname = rest;\n }\n\n if (slashedProtocol[lowerProto] && this.hostname && !this.pathname) {\n this.pathname = '';\n }\n\n return this;\n};\n\nUrl.prototype.parseHost = function (host) {\n var port = portPattern.exec(host);\n\n if (port) {\n port = port[0];\n\n if (port !== ':') {\n this.port = port.substr(1);\n }\n\n host = host.substr(0, host.length - port.length);\n }\n\n if (host) {\n this.hostname = host;\n }\n};\n\nmodule.exports = urlParse;","module.exports = /[\\xAD\\u0600-\\u0605\\u061C\\u06DD\\u070F\\u08E2\\u180E\\u200B-\\u200F\\u202A-\\u202E\\u2060-\\u2064\\u2066-\\u206F\\uFEFF\\uFFF9-\\uFFFB]|\\uD804[\\uDCBD\\uDCCD]|\\uD82F[\\uDCA0-\\uDCA3]|\\uD834[\\uDD73-\\uDD7A]|\\uDB40[\\uDC01\\uDC20-\\uDC7F]/;","// Just a shortcut for bulk export\n'use strict';\n\nexports.parseLinkLabel = require('./parse_link_label');\nexports.parseLinkDestination = require('./parse_link_destination');\nexports.parseLinkTitle = require('./parse_link_title');","var defineProperty = require('../internals/object-define-property').f;\nvar has = require('../internals/has');\nvar wellKnownSymbol = require('../internals/well-known-symbol');\n\nvar TO_STRING_TAG = wellKnownSymbol('toStringTag');\n\nmodule.exports = function (it, TAG, STATIC) {\n if (it && !has(it = STATIC ? it : it.prototype, TO_STRING_TAG)) {\n defineProperty(it, TO_STRING_TAG, { configurable: true, value: TAG });\n }\n};\n","// Parse link label\n//\n// this function assumes that first character (\"[\") already matches;\n// returns the end of the label\n//\n'use strict';\n\nmodule.exports = function parseLinkLabel(state, start, disableNested) {\n var level,\n found,\n marker,\n prevPos,\n labelEnd = -1,\n max = state.posMax,\n oldPos = state.pos;\n state.pos = start + 1;\n level = 1;\n\n while (state.pos < max) {\n marker = state.src.charCodeAt(state.pos);\n\n if (marker === 0x5D\n /* ] */\n ) {\n level--;\n\n if (level === 0) {\n found = true;\n break;\n }\n }\n\n prevPos = state.pos;\n state.md.inline.skipToken(state);\n\n if (marker === 0x5B\n /* [ */\n ) {\n if (prevPos === state.pos - 1) {\n // increase level if we find text `[`, which is not a part of any token\n level++;\n } else if (disableNested) {\n state.pos = oldPos;\n return -1;\n }\n }\n }\n\n if (found) {\n labelEnd = state.pos;\n } // restore old state\n\n\n state.pos = oldPos;\n return labelEnd;\n};","// Parse link destination\n//\n'use strict';\n\nvar unescapeAll = require('../common/utils').unescapeAll;\n\nmodule.exports = function parseLinkDestination(str, start, max) {\n var code,\n level,\n pos = start,\n result = {\n ok: false,\n pos: 0,\n lines: 0,\n str: ''\n };\n\n if (str.charCodeAt(pos) === 0x3C\n /* < */\n ) {\n pos++;\n\n while (pos < max) {\n code = str.charCodeAt(pos);\n\n if (code === 0x0A\n /* \\n */\n ) {\n return result;\n }\n\n if (code === 0x3C\n /* < */\n ) {\n return result;\n }\n\n if (code === 0x3E\n /* > */\n ) {\n result.pos = pos + 1;\n result.str = unescapeAll(str.slice(start + 1, pos));\n result.ok = true;\n return result;\n }\n\n if (code === 0x5C\n /* \\ */\n && pos + 1 < max) {\n pos += 2;\n continue;\n }\n\n pos++;\n } // no closing '>'\n\n\n return result;\n } // this should be ... } else { ... branch\n\n\n level = 0;\n\n while (pos < max) {\n code = str.charCodeAt(pos);\n\n if (code === 0x20) {\n break;\n } // ascii control characters\n\n\n if (code < 0x20 || code === 0x7F) {\n break;\n }\n\n if (code === 0x5C\n /* \\ */\n && pos + 1 < max) {\n if (str.charCodeAt(pos + 1) === 0x20) {\n break;\n }\n\n pos += 2;\n continue;\n }\n\n if (code === 0x28\n /* ( */\n ) {\n level++;\n\n if (level > 32) {\n return result;\n }\n }\n\n if (code === 0x29\n /* ) */\n ) {\n if (level === 0) {\n break;\n }\n\n level--;\n }\n\n pos++;\n }\n\n if (start === pos) {\n return result;\n }\n\n if (level !== 0) {\n return result;\n }\n\n result.str = unescapeAll(str.slice(start, pos));\n result.pos = pos;\n result.ok = true;\n return result;\n};","// Parse link title\n//\n'use strict';\n\nvar unescapeAll = require('../common/utils').unescapeAll;\n\nmodule.exports = function parseLinkTitle(str, start, max) {\n var code,\n marker,\n lines = 0,\n pos = start,\n result = {\n ok: false,\n pos: 0,\n lines: 0,\n str: ''\n };\n\n if (pos >= max) {\n return result;\n }\n\n marker = str.charCodeAt(pos);\n\n if (marker !== 0x22\n /* \" */\n && marker !== 0x27\n /* ' */\n && marker !== 0x28\n /* ( */\n ) {\n return result;\n }\n\n pos++; // if opening marker is \"(\", switch it to closing marker \")\"\n\n if (marker === 0x28) {\n marker = 0x29;\n }\n\n while (pos < max) {\n code = str.charCodeAt(pos);\n\n if (code === marker) {\n result.pos = pos + 1;\n result.lines = lines;\n result.str = unescapeAll(str.slice(start + 1, pos));\n result.ok = true;\n return result;\n } else if (code === 0x28\n /* ( */\n && marker === 0x29\n /* ) */\n ) {\n return result;\n } else if (code === 0x0A) {\n lines++;\n } else if (code === 0x5C\n /* \\ */\n && pos + 1 < max) {\n pos++;\n\n if (str.charCodeAt(pos) === 0x0A) {\n lines++;\n }\n }\n\n pos++;\n }\n\n return result;\n};","/**\n * class Renderer\n *\n * Generates HTML from parsed token stream. Each instance has independent\n * copy of rules. Those can be rewritten with ease. Also, you can add new\n * rules if you create plugin and adds new token types.\n **/\n'use strict';\n\nvar assign = require('./common/utils').assign;\n\nvar unescapeAll = require('./common/utils').unescapeAll;\n\nvar escapeHtml = require('./common/utils').escapeHtml; ////////////////////////////////////////////////////////////////////////////////\n\n\nvar default_rules = {};\n\ndefault_rules.code_inline = function (tokens, idx, options, env, slf) {\n var token = tokens[idx];\n return '' + escapeHtml(token.content) + '
';\n};\n\ndefault_rules.code_block = function (tokens, idx, options, env, slf) {\n var token = tokens[idx];\n return '
' + escapeHtml(tokens[idx].content) + '
\\n';\n};\n\ndefault_rules.fence = function (tokens, idx, options, env, slf) {\n var token = tokens[idx],\n info = token.info ? unescapeAll(token.info).trim() : '',\n langName = '',\n langAttrs = '',\n highlighted,\n i,\n arr,\n tmpAttrs,\n tmpToken;\n\n if (info) {\n arr = info.split(/(\\s+)/g);\n langName = arr[0];\n langAttrs = arr.slice(2).join('');\n }\n\n if (options.highlight) {\n highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content);\n } else {\n highlighted = escapeHtml(token.content);\n }\n\n if (highlighted.indexOf('' + highlighted + '
\\n';\n }\n\n return '' + highlighted + '
\\n';\n};\n\ndefault_rules.image = function (tokens, idx, options, env, slf) {\n var token = tokens[idx]; // \"alt\" attr MUST be set, even if empty. Because it's mandatory and\n // should be placed on proper position for tests.\n //\n // Replace content with actual value\n\n token.attrs[token.attrIndex('alt')][1] = slf.renderInlineAsText(token.children, options, env);\n return slf.renderToken(tokens, idx, options);\n};\n\ndefault_rules.hardbreak = function (tokens, idx, options\n/*, env */\n) {\n return options.xhtmlOut ? '
\\n' : '
\\n';\n};\n\ndefault_rules.softbreak = function (tokens, idx, options\n/*, env */\n) {\n return options.breaks ? options.xhtmlOut ? '
\\n' : '
\\n' : '\\n';\n};\n\ndefault_rules.text = function (tokens, idx\n/*, options, env */\n) {\n return escapeHtml(tokens[idx].content);\n};\n\ndefault_rules.html_block = function (tokens, idx\n/*, options, env */\n) {\n return tokens[idx].content;\n};\n\ndefault_rules.html_inline = function (tokens, idx\n/*, options, env */\n) {\n return tokens[idx].content;\n};\n/**\n * new Renderer()\n *\n * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.\n **/\n\n\nfunction Renderer() {\n /**\n * Renderer#rules -> Object\n *\n * Contains render rules for tokens. Can be updated and extended.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.renderer.rules.strong_open = function () { return ''; };\n * md.renderer.rules.strong_close = function () { return ''; };\n *\n * var result = md.renderInline(...);\n * ```\n *\n * Each rule is called as independent static function with fixed signature:\n *\n * ```javascript\n * function my_token_render(tokens, idx, options, env, renderer) {\n * // ...\n * return renderedHTML;\n * }\n * ```\n *\n * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js)\n * for more details and examples.\n **/\n this.rules = assign({}, default_rules);\n}\n/**\n * Renderer.renderAttrs(token) -> String\n *\n * Render token attributes to string.\n **/\n\n\nRenderer.prototype.renderAttrs = function renderAttrs(token) {\n var i, l, result;\n\n if (!token.attrs) {\n return '';\n }\n\n result = '';\n\n for (i = 0, l = token.attrs.length; i < l; i++) {\n result += ' ' + escapeHtml(token.attrs[i][0]) + '=\"' + escapeHtml(token.attrs[i][1]) + '\"';\n }\n\n return result;\n};\n/**\n * Renderer.renderToken(tokens, idx, options) -> String\n * - tokens (Array): list of tokens\n * - idx (Numbed): token index to render\n * - options (Object): params of parser instance\n *\n * Default token renderer. Can be overriden by custom function\n * in [[Renderer#rules]].\n **/\n\n\nRenderer.prototype.renderToken = function renderToken(tokens, idx, options) {\n var nextToken,\n result = '',\n needLf = false,\n token = tokens[idx]; // Tight list paragraphs\n\n if (token.hidden) {\n return '';\n } // Insert a newline between hidden paragraph and subsequent opening\n // block-level tag.\n //\n // For example, here we should insert a newline before blockquote:\n // - a\n // >\n //\n\n\n if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {\n result += '\\n';\n } // Add token name, e.g. `
`.\n //\n needLf = false;\n }\n }\n }\n }\n\n result += needLf ? '>\\n' : '>';\n return result;\n};\n/**\n * Renderer.renderInline(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * The same as [[Renderer.render]], but for single token of `inline` type.\n **/\n\n\nRenderer.prototype.renderInline = function (tokens, options, env) {\n var type,\n result = '',\n rules = this.rules;\n\n for (var i = 0, len = tokens.length; i < len; i++) {\n type = tokens[i].type;\n\n if (typeof rules[type] !== 'undefined') {\n result += rules[type](tokens, i, options, env, this);\n } else {\n result += this.renderToken(tokens, i, options);\n }\n }\n\n return result;\n};\n/** internal\n * Renderer.renderInlineAsText(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * Special kludge for image `alt` attributes to conform CommonMark spec.\n * Don't try to use it! Spec requires to show `alt` content with stripped markup,\n * instead of simple escaping.\n **/\n\n\nRenderer.prototype.renderInlineAsText = function (tokens, options, env) {\n var result = '';\n\n for (var i = 0, len = tokens.length; i < len; i++) {\n if (tokens[i].type === 'text') {\n result += tokens[i].content;\n } else if (tokens[i].type === 'image') {\n result += this.renderInlineAsText(tokens[i].children, options, env);\n } else if (tokens[i].type === 'softbreak') {\n result += '\\n';\n }\n }\n\n return result;\n};\n/**\n * Renderer.render(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * Takes token stream and generates HTML. Probably, you will never need to call\n * this method directly.\n **/\n\n\nRenderer.prototype.render = function (tokens, options, env) {\n var i,\n len,\n type,\n result = '',\n rules = this.rules;\n\n for (i = 0, len = tokens.length; i < len; i++) {\n type = tokens[i].type;\n\n if (type === 'inline') {\n result += this.renderInline(tokens[i].children, options, env);\n } else if (typeof rules[type] !== 'undefined') {\n result += rules[type](tokens, i, options, env, this);\n } else {\n result += this.renderToken(tokens, i, options, env);\n }\n }\n\n return result;\n};\n\nmodule.exports = Renderer;","/** internal\n * class Core\n *\n * Top-level rules executor. Glues block/inline parsers and does intermediate\n * transformations.\n **/\n'use strict';\n\nvar Ruler = require('./ruler');\n\nvar _rules = [['normalize', require('./rules_core/normalize')], ['block', require('./rules_core/block')], ['inline', require('./rules_core/inline')], ['linkify', require('./rules_core/linkify')], ['replacements', require('./rules_core/replacements')], ['smartquotes', require('./rules_core/smartquotes')], // `text_join` finds `text_special` tokens (for escape sequences)\n// and joins them with the rest of the text\n['text_join', require('./rules_core/text_join')]];\n/**\n * new Core()\n **/\n\nfunction Core() {\n /**\n * Core#ruler -> Ruler\n *\n * [[Ruler]] instance. Keep configuration of core rules.\n **/\n this.ruler = new Ruler();\n\n for (var i = 0; i < _rules.length; i++) {\n this.ruler.push(_rules[i][0], _rules[i][1]);\n }\n}\n/**\n * Core.process(state)\n *\n * Executes core chain rules.\n **/\n\n\nCore.prototype.process = function (state) {\n var i, l, rules;\n rules = this.ruler.getRules('');\n\n for (i = 0, l = rules.length; i < l; i++) {\n rules[i](state);\n }\n};\n\nCore.prototype.State = require('./rules_core/state_core');\nmodule.exports = Core;","// Normalize input string\n'use strict'; // https://spec.commonmark.org/0.29/#line-ending\n\nvar NEWLINES_RE = /\\r\\n?|\\n/g;\nvar NULL_RE = /\\0/g;\n\nmodule.exports = function normalize(state) {\n var str; // Normalize newlines\n\n str = state.src.replace(NEWLINES_RE, '\\n'); // Replace NULL characters\n\n str = str.replace(NULL_RE, \"\\uFFFD\");\n state.src = str;\n};","'use strict';\n\nmodule.exports = function block(state) {\n var token;\n\n if (state.inlineMode) {\n token = new state.Token('inline', '', 0);\n token.content = state.src;\n token.map = [0, 1];\n token.children = [];\n state.tokens.push(token);\n } else {\n state.md.block.parse(state.src, state.md, state.env, state.tokens);\n }\n};","'use strict';\n\nmodule.exports = function inline(state) {\n var tokens = state.tokens,\n tok,\n i,\n l; // Parse inlines\n\n for (i = 0, l = tokens.length; i < l; i++) {\n tok = tokens[i];\n\n if (tok.type === 'inline') {\n state.md.inline.parse(tok.content, state.md, state.env, tok.children);\n }\n }\n};","// Replace link-like texts with link nodes.\n//\n// Currently restricted by `md.validateLink()` to http/https/ftp\n//\n'use strict';\n\nvar arrayReplaceAt = require('../common/utils').arrayReplaceAt;\n\nfunction isLinkOpen(str) {\n return /^\\s]/i.test(str);\n}\n\nfunction isLinkClose(str) {\n return /^<\\/a\\s*>/i.test(str);\n}\n\nmodule.exports = function linkify(state) {\n var i,\n j,\n l,\n tokens,\n token,\n currentToken,\n nodes,\n ln,\n text,\n pos,\n lastPos,\n level,\n htmlLinkLevel,\n url,\n fullUrl,\n urlText,\n blockTokens = state.tokens,\n links;\n\n if (!state.md.options.linkify) {\n return;\n }\n\n for (j = 0, l = blockTokens.length; j < l; j++) {\n if (blockTokens[j].type !== 'inline' || !state.md.linkify.pretest(blockTokens[j].content)) {\n continue;\n }\n\n tokens = blockTokens[j].children;\n htmlLinkLevel = 0; // We scan from the end, to keep position when new tags added.\n // Use reversed logic in links start/end match\n\n for (i = tokens.length - 1; i >= 0; i--) {\n currentToken = tokens[i]; // Skip content of markdown links\n\n if (currentToken.type === 'link_close') {\n i--;\n\n while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {\n i--;\n }\n\n continue;\n } // Skip content of html tag links\n\n\n if (currentToken.type === 'html_inline') {\n if (isLinkOpen(currentToken.content) && htmlLinkLevel > 0) {\n htmlLinkLevel--;\n }\n\n if (isLinkClose(currentToken.content)) {\n htmlLinkLevel++;\n }\n }\n\n if (htmlLinkLevel > 0) {\n continue;\n }\n\n if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {\n text = currentToken.content;\n links = state.md.linkify.match(text); // Now split string to nodes\n\n nodes = [];\n level = currentToken.level;\n lastPos = 0; // forbid escape sequence at the start of the string,\n // this avoids http\\://example.com/ from being linkified as\n // http://example.com/\n\n if (links.length > 0 && links[0].index === 0 && i > 0 && tokens[i - 1].type === 'text_special') {\n links = links.slice(1);\n }\n\n for (ln = 0; ln < links.length; ln++) {\n url = links[ln].url;\n fullUrl = state.md.normalizeLink(url);\n\n if (!state.md.validateLink(fullUrl)) {\n continue;\n }\n\n urlText = links[ln].text; // Linkifier might send raw hostnames like \"example.com\", where url\n // starts with domain name. So we prepend http:// in those cases,\n // and remove it afterwards.\n //\n\n if (!links[ln].schema) {\n urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\\/\\//, '');\n } else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {\n urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');\n } else {\n urlText = state.md.normalizeLinkText(urlText);\n }\n\n pos = links[ln].index;\n\n if (pos > lastPos) {\n token = new state.Token('text', '', 0);\n token.content = text.slice(lastPos, pos);\n token.level = level;\n nodes.push(token);\n }\n\n token = new state.Token('link_open', 'a', 1);\n token.attrs = [['href', fullUrl]];\n token.level = level++;\n token.markup = 'linkify';\n token.info = 'auto';\n nodes.push(token);\n token = new state.Token('text', '', 0);\n token.content = urlText;\n token.level = level;\n nodes.push(token);\n token = new state.Token('link_close', 'a', -1);\n token.level = --level;\n token.markup = 'linkify';\n token.info = 'auto';\n nodes.push(token);\n lastPos = links[ln].lastIndex;\n }\n\n if (lastPos < text.length) {\n token = new state.Token('text', '', 0);\n token.content = text.slice(lastPos);\n token.level = level;\n nodes.push(token);\n } // replace current node\n\n\n blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);\n }\n }\n }\n};","// Simple typographic replacements\n//\n// (c) (C) → ©\n// (tm) (TM) → ™\n// (r) (R) → ®\n// +- → ±\n// ... → … (also ?.... → ?.., !.... → !..)\n// ???????? → ???, !!!!! → !!!, `,,` → `,`\n// -- → –, --- → —\n//\n'use strict'; // TODO:\n// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾\n// - multiplications 2 x 4 -> 2 × 4\n\nvar RARE_RE = /\\+-|\\.\\.|\\?\\?\\?\\?|!!!!|,,|--/; // Workaround for phantomjs - need regex without /g flag,\n// or root check will fail every second time\n\nvar SCOPED_ABBR_TEST_RE = /\\((c|tm|r)\\)/i;\nvar SCOPED_ABBR_RE = /\\((c|tm|r)\\)/ig;\nvar SCOPED_ABBR = {\n c: '©',\n r: '®',\n tm: '™'\n};\n\nfunction replaceFn(match, name) {\n return SCOPED_ABBR[name.toLowerCase()];\n}\n\nfunction replace_scoped(inlineTokens) {\n var i,\n token,\n inside_autolink = 0;\n\n for (i = inlineTokens.length - 1; i >= 0; i--) {\n token = inlineTokens[i];\n\n if (token.type === 'text' && !inside_autolink) {\n token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);\n }\n\n if (token.type === 'link_open' && token.info === 'auto') {\n inside_autolink--;\n }\n\n if (token.type === 'link_close' && token.info === 'auto') {\n inside_autolink++;\n }\n }\n}\n\nfunction replace_rare(inlineTokens) {\n var i,\n token,\n inside_autolink = 0;\n\n for (i = inlineTokens.length - 1; i >= 0; i--) {\n token = inlineTokens[i];\n\n if (token.type === 'text' && !inside_autolink) {\n if (RARE_RE.test(token.content)) {\n token.content = token.content.replace(/\\+-/g, '±') // .., ..., ....... -> …\n // but ?..... & !..... -> ?.. & !..\n .replace(/\\.{2,}/g, '…').replace(/([?!])…/g, '$1..').replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',') // em-dash\n .replace(/(^|[^-])---(?=[^-]|$)/mg, \"$1\\u2014\") // en-dash\n .replace(/(^|\\s)--(?=\\s|$)/mg, \"$1\\u2013\").replace(/(^|[^-\\s])--(?=[^-\\s]|$)/mg, \"$1\\u2013\");\n }\n }\n\n if (token.type === 'link_open' && token.info === 'auto') {\n inside_autolink--;\n }\n\n if (token.type === 'link_close' && token.info === 'auto') {\n inside_autolink++;\n }\n }\n}\n\nmodule.exports = function replace(state) {\n var blkIdx;\n\n if (!state.md.options.typographer) {\n return;\n }\n\n for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {\n if (state.tokens[blkIdx].type !== 'inline') {\n continue;\n }\n\n if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {\n replace_scoped(state.tokens[blkIdx].children);\n }\n\n if (RARE_RE.test(state.tokens[blkIdx].content)) {\n replace_rare(state.tokens[blkIdx].children);\n }\n }\n};","var ceil = Math.ceil;\nvar floor = Math.floor;\n\n// `ToInteger` abstract operation\n// https://tc39.es/ecma262/#sec-tointeger\nmodule.exports = function (argument) {\n return isNaN(argument = +argument) ? 0 : (argument > 0 ? floor : ceil)(argument);\n};\n","// Convert straight quotation marks to typographic ones\n//\n'use strict';\n\nvar isWhiteSpace = require('../common/utils').isWhiteSpace;\n\nvar isPunctChar = require('../common/utils').isPunctChar;\n\nvar isMdAsciiPunct = require('../common/utils').isMdAsciiPunct;\n\nvar QUOTE_TEST_RE = /['\"]/;\nvar QUOTE_RE = /['\"]/g;\nvar APOSTROPHE = \"\\u2019\";\n/* ’ */\n\nfunction replaceAt(str, index, ch) {\n return str.slice(0, index) + ch + str.slice(index + 1);\n}\n\nfunction process_inlines(tokens, state) {\n var i, token, text, t, pos, max, thisLevel, item, lastChar, nextChar, isLastPunctChar, isNextPunctChar, isLastWhiteSpace, isNextWhiteSpace, canOpen, canClose, j, isSingle, stack, openQuote, closeQuote;\n stack = [];\n\n for (i = 0; i < tokens.length; i++) {\n token = tokens[i];\n thisLevel = tokens[i].level;\n\n for (j = stack.length - 1; j >= 0; j--) {\n if (stack[j].level <= thisLevel) {\n break;\n }\n }\n\n stack.length = j + 1;\n\n if (token.type !== 'text') {\n continue;\n }\n\n text = token.content;\n pos = 0;\n max = text.length;\n /*eslint no-labels:0,block-scoped-var:0*/\n\n OUTER: while (pos < max) {\n QUOTE_RE.lastIndex = pos;\n t = QUOTE_RE.exec(text);\n\n if (!t) {\n break;\n }\n\n canOpen = canClose = true;\n pos = t.index + 1;\n isSingle = t[0] === \"'\"; // Find previous character,\n // default to space if it's the beginning of the line\n //\n\n lastChar = 0x20;\n\n if (t.index - 1 >= 0) {\n lastChar = text.charCodeAt(t.index - 1);\n } else {\n for (j = i - 1; j >= 0; j--) {\n if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20\n\n if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'\n\n lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);\n break;\n }\n } // Find next character,\n // default to space if it's the end of the line\n //\n\n\n nextChar = 0x20;\n\n if (pos < max) {\n nextChar = text.charCodeAt(pos);\n } else {\n for (j = i + 1; j < tokens.length; j++) {\n if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20\n\n if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'\n\n nextChar = tokens[j].content.charCodeAt(0);\n break;\n }\n }\n\n isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));\n isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));\n isLastWhiteSpace = isWhiteSpace(lastChar);\n isNextWhiteSpace = isWhiteSpace(nextChar);\n\n if (isNextWhiteSpace) {\n canOpen = false;\n } else if (isNextPunctChar) {\n if (!(isLastWhiteSpace || isLastPunctChar)) {\n canOpen = false;\n }\n }\n\n if (isLastWhiteSpace) {\n canClose = false;\n } else if (isLastPunctChar) {\n if (!(isNextWhiteSpace || isNextPunctChar)) {\n canClose = false;\n }\n }\n\n if (nextChar === 0x22\n /* \" */\n && t[0] === '\"') {\n if (lastChar >= 0x30\n /* 0 */\n && lastChar <= 0x39\n /* 9 */\n ) {\n // special case: 1\"\" - count first quote as an inch\n canClose = canOpen = false;\n }\n }\n\n if (canOpen && canClose) {\n // Replace quotes in the middle of punctuation sequence, but not\n // in the middle of the words, i.e.:\n //\n // 1. foo \" bar \" baz - not replaced\n // 2. foo-\"-bar-\"-baz - replaced\n // 3. foo\"bar\"baz - not replaced\n //\n canOpen = isLastPunctChar;\n canClose = isNextPunctChar;\n }\n\n if (!canOpen && !canClose) {\n // middle of word\n if (isSingle) {\n token.content = replaceAt(token.content, t.index, APOSTROPHE);\n }\n\n continue;\n }\n\n if (canClose) {\n // this could be a closing quote, rewind the stack to get a match\n for (j = stack.length - 1; j >= 0; j--) {\n item = stack[j];\n\n if (stack[j].level < thisLevel) {\n break;\n }\n\n if (item.single === isSingle && stack[j].level === thisLevel) {\n item = stack[j];\n\n if (isSingle) {\n openQuote = state.md.options.quotes[2];\n closeQuote = state.md.options.quotes[3];\n } else {\n openQuote = state.md.options.quotes[0];\n closeQuote = state.md.options.quotes[1];\n } // replace token.content *before* tokens[item.token].content,\n // because, if they are pointing at the same token, replaceAt\n // could mess up indices when quote length != 1\n\n\n token.content = replaceAt(token.content, t.index, closeQuote);\n tokens[item.token].content = replaceAt(tokens[item.token].content, item.pos, openQuote);\n pos += closeQuote.length - 1;\n\n if (item.token === i) {\n pos += openQuote.length - 1;\n }\n\n text = token.content;\n max = text.length;\n stack.length = j;\n continue OUTER;\n }\n }\n }\n\n if (canOpen) {\n stack.push({\n token: i,\n pos: t.index,\n single: isSingle,\n level: thisLevel\n });\n } else if (canClose && isSingle) {\n token.content = replaceAt(token.content, t.index, APOSTROPHE);\n }\n }\n }\n}\n\nmodule.exports = function smartquotes(state) {\n /*eslint max-depth:0*/\n var blkIdx;\n\n if (!state.md.options.typographer) {\n return;\n }\n\n for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {\n if (state.tokens[blkIdx].type !== 'inline' || !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {\n continue;\n }\n\n process_inlines(state.tokens[blkIdx].children, state);\n }\n};","// Join raw text tokens with the rest of the text\n//\n// This is set as a separate rule to provide an opportunity for plugins\n// to run text replacements after text join, but before escape join.\n//\n// For example, `\\:)` shouldn't be replaced with an emoji.\n//\n'use strict';\n\nmodule.exports = function text_join(state) {\n var j,\n l,\n tokens,\n curr,\n max,\n last,\n blockTokens = state.tokens;\n\n for (j = 0, l = blockTokens.length; j < l; j++) {\n if (blockTokens[j].type !== 'inline') continue;\n tokens = blockTokens[j].children;\n max = tokens.length;\n\n for (curr = 0; curr < max; curr++) {\n if (tokens[curr].type === 'text_special') {\n tokens[curr].type = 'text';\n }\n }\n\n for (curr = last = 0; curr < max; curr++) {\n if (tokens[curr].type === 'text' && curr + 1 < max && tokens[curr + 1].type === 'text') {\n // collapse two adjacent text nodes\n tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;\n } else {\n if (curr !== last) {\n tokens[last] = tokens[curr];\n }\n\n last++;\n }\n }\n\n if (curr !== last) {\n tokens.length = last;\n }\n }\n};","// Core state object\n//\n'use strict';\n\nvar Token = require('../token');\n\nfunction StateCore(src, md, env) {\n this.src = src;\n this.env = env;\n this.tokens = [];\n this.inlineMode = false;\n this.md = md; // link to parser instance\n} // re-export Token class to use in core rules\n\n\nStateCore.prototype.Token = Token;\nmodule.exports = StateCore;","/** internal\n * class ParserBlock\n *\n * Block-level tokenizer.\n **/\n'use strict';\n\nvar Ruler = require('./ruler');\n\nvar _rules = [// First 2 params - rule name & source. Secondary array - list of rules,\n// which can be terminated by this one.\n['table', require('./rules_block/table'), ['paragraph', 'reference']], ['code', require('./rules_block/code')], ['fence', require('./rules_block/fence'), ['paragraph', 'reference', 'blockquote', 'list']], ['blockquote', require('./rules_block/blockquote'), ['paragraph', 'reference', 'blockquote', 'list']], ['hr', require('./rules_block/hr'), ['paragraph', 'reference', 'blockquote', 'list']], ['list', require('./rules_block/list'), ['paragraph', 'reference', 'blockquote']], ['reference', require('./rules_block/reference')], ['html_block', require('./rules_block/html_block'), ['paragraph', 'reference', 'blockquote']], ['heading', require('./rules_block/heading'), ['paragraph', 'reference', 'blockquote']], ['lheading', require('./rules_block/lheading')], ['paragraph', require('./rules_block/paragraph')]];\n/**\n * new ParserBlock()\n **/\n\nfunction ParserBlock() {\n /**\n * ParserBlock#ruler -> Ruler\n *\n * [[Ruler]] instance. Keep configuration of block rules.\n **/\n this.ruler = new Ruler();\n\n for (var i = 0; i < _rules.length; i++) {\n this.ruler.push(_rules[i][0], _rules[i][1], {\n alt: (_rules[i][2] || []).slice()\n });\n }\n} // Generate tokens for input range\n//\n\n\nParserBlock.prototype.tokenize = function (state, startLine, endLine) {\n var ok,\n i,\n prevLine,\n rules = this.ruler.getRules(''),\n len = rules.length,\n line = startLine,\n hasEmptyLines = false,\n maxNesting = state.md.options.maxNesting;\n\n while (line < endLine) {\n state.line = line = state.skipEmptyLines(line);\n\n if (line >= endLine) {\n break;\n } // Termination condition for nested calls.\n // Nested calls currently used for blockquotes & lists\n\n\n if (state.sCount[line] < state.blkIndent) {\n break;\n } // If nesting level exceeded - skip tail to the end. That's not ordinary\n // situation and we should not care about content.\n\n\n if (state.level >= maxNesting) {\n state.line = endLine;\n break;\n } // Try all possible rules.\n // On success, rule should:\n //\n // - update `state.line`\n // - update `state.tokens`\n // - return true\n\n\n prevLine = state.line;\n\n for (i = 0; i < len; i++) {\n ok = rules[i](state, line, endLine, false);\n\n if (ok) {\n if (prevLine >= state.line) {\n throw new Error(\"block rule didn't increment state.line\");\n }\n\n break;\n }\n } // this can only happen if user disables paragraph rule\n\n\n if (!ok) throw new Error('none of the block rules matched'); // set state.tight if we had an empty line before current tag\n // i.e. latest empty line should not count\n\n state.tight = !hasEmptyLines; // paragraph might \"eat\" one newline after it in nested lists\n\n if (state.isEmpty(state.line - 1)) {\n hasEmptyLines = true;\n }\n\n line = state.line;\n\n if (line < endLine && state.isEmpty(line)) {\n hasEmptyLines = true;\n line++;\n state.line = line;\n }\n }\n};\n/**\n * ParserBlock.parse(str, md, env, outTokens)\n *\n * Process input string and push block tokens into `outTokens`\n **/\n\n\nParserBlock.prototype.parse = function (src, md, env, outTokens) {\n var state;\n\n if (!src) {\n return;\n }\n\n state = new this.State(src, md, env, outTokens);\n this.tokenize(state, state.line, state.lineMax);\n};\n\nParserBlock.prototype.State = require('./rules_block/state_block');\nmodule.exports = ParserBlock;","// GFM table, https://github.github.com/gfm/#tables-extension-\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nfunction getLine(state, line) {\n var pos = state.bMarks[line] + state.tShift[line],\n max = state.eMarks[line];\n return state.src.slice(pos, max);\n}\n\nfunction escapedSplit(str) {\n var result = [],\n pos = 0,\n max = str.length,\n ch,\n isEscaped = false,\n lastPos = 0,\n current = '';\n ch = str.charCodeAt(pos);\n\n while (pos < max) {\n if (ch === 0x7c\n /* | */\n ) {\n if (!isEscaped) {\n // pipe separating cells, '|'\n result.push(current + str.substring(lastPos, pos));\n current = '';\n lastPos = pos + 1;\n } else {\n // escaped pipe, '\\|'\n current += str.substring(lastPos, pos - 1);\n lastPos = pos;\n }\n }\n\n isEscaped = ch === 0x5c\n /* \\ */\n ;\n pos++;\n ch = str.charCodeAt(pos);\n }\n\n result.push(current + str.substring(lastPos));\n return result;\n}\n\nmodule.exports = function table(state, startLine, endLine, silent) {\n var ch, lineText, pos, i, l, nextLine, columns, columnCount, token, aligns, t, tableLines, tbodyLines, oldParentType, terminate, terminatorRules, firstCh, secondCh; // should have at least two lines\n\n if (startLine + 2 > endLine) {\n return false;\n }\n\n nextLine = startLine + 1;\n\n if (state.sCount[nextLine] < state.blkIndent) {\n return false;\n } // if it's indented more than 3 spaces, it should be a code block\n\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n return false;\n } // first character of the second line should be '|', '-', ':',\n // and no other characters are allowed but spaces;\n // basically, this is the equivalent of /^[-:|][-:|\\s]*$/ regexp\n\n\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n\n if (pos >= state.eMarks[nextLine]) {\n return false;\n }\n\n firstCh = state.src.charCodeAt(pos++);\n\n if (firstCh !== 0x7C\n /* | */\n && firstCh !== 0x2D\n /* - */\n && firstCh !== 0x3A\n /* : */\n ) {\n return false;\n }\n\n if (pos >= state.eMarks[nextLine]) {\n return false;\n }\n\n secondCh = state.src.charCodeAt(pos++);\n\n if (secondCh !== 0x7C\n /* | */\n && secondCh !== 0x2D\n /* - */\n && secondCh !== 0x3A\n /* : */\n && !isSpace(secondCh)) {\n return false;\n } // if first character is '-', then second character must not be a space\n // (due to parsing ambiguity with list)\n\n\n if (firstCh === 0x2D\n /* - */\n && isSpace(secondCh)) {\n return false;\n }\n\n while (pos < state.eMarks[nextLine]) {\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x7C\n /* | */\n && ch !== 0x2D\n /* - */\n && ch !== 0x3A\n /* : */\n && !isSpace(ch)) {\n return false;\n }\n\n pos++;\n }\n\n lineText = getLine(state, startLine + 1);\n columns = lineText.split('|');\n aligns = [];\n\n for (i = 0; i < columns.length; i++) {\n t = columns[i].trim();\n\n if (!t) {\n // allow empty columns before and after table, but not in between columns;\n // e.g. allow ` |---| `, disallow ` ---||--- `\n if (i === 0 || i === columns.length - 1) {\n continue;\n } else {\n return false;\n }\n }\n\n if (!/^:?-+:?$/.test(t)) {\n return false;\n }\n\n if (t.charCodeAt(t.length - 1) === 0x3A\n /* : */\n ) {\n aligns.push(t.charCodeAt(0) === 0x3A\n /* : */\n ? 'center' : 'right');\n } else if (t.charCodeAt(0) === 0x3A\n /* : */\n ) {\n aligns.push('left');\n } else {\n aligns.push('');\n }\n }\n\n lineText = getLine(state, startLine).trim();\n\n if (lineText.indexOf('|') === -1) {\n return false;\n }\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n columns = escapedSplit(lineText);\n if (columns.length && columns[0] === '') columns.shift();\n if (columns.length && columns[columns.length - 1] === '') columns.pop(); // header row will define an amount of columns in the entire table,\n // and align row should be exactly the same (the rest of the rows can differ)\n\n columnCount = columns.length;\n\n if (columnCount === 0 || columnCount !== aligns.length) {\n return false;\n }\n\n if (silent) {\n return true;\n }\n\n oldParentType = state.parentType;\n state.parentType = 'table'; // use 'blockquote' lists for termination because it's\n // the most similar to tables\n\n terminatorRules = state.md.block.ruler.getRules('blockquote');\n token = state.push('table_open', 'table', 1);\n token.map = tableLines = [startLine, 0];\n token = state.push('thead_open', 'thead', 1);\n token.map = [startLine, startLine + 1];\n token = state.push('tr_open', 'tr', 1);\n token.map = [startLine, startLine + 1];\n\n for (i = 0; i < columns.length; i++) {\n token = state.push('th_open', 'th', 1);\n\n if (aligns[i]) {\n token.attrs = [['style', 'text-align:' + aligns[i]]];\n }\n\n token = state.push('inline', '', 0);\n token.content = columns[i].trim();\n token.children = [];\n token = state.push('th_close', 'th', -1);\n }\n\n token = state.push('tr_close', 'tr', -1);\n token = state.push('thead_close', 'thead', -1);\n\n for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {\n if (state.sCount[nextLine] < state.blkIndent) {\n break;\n }\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n break;\n }\n\n lineText = getLine(state, nextLine).trim();\n\n if (!lineText) {\n break;\n }\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n break;\n }\n\n columns = escapedSplit(lineText);\n if (columns.length && columns[0] === '') columns.shift();\n if (columns.length && columns[columns.length - 1] === '') columns.pop();\n\n if (nextLine === startLine + 2) {\n token = state.push('tbody_open', 'tbody', 1);\n token.map = tbodyLines = [startLine + 2, 0];\n }\n\n token = state.push('tr_open', 'tr', 1);\n token.map = [nextLine, nextLine + 1];\n\n for (i = 0; i < columnCount; i++) {\n token = state.push('td_open', 'td', 1);\n\n if (aligns[i]) {\n token.attrs = [['style', 'text-align:' + aligns[i]]];\n }\n\n token = state.push('inline', '', 0);\n token.content = columns[i] ? columns[i].trim() : '';\n token.children = [];\n token = state.push('td_close', 'td', -1);\n }\n\n token = state.push('tr_close', 'tr', -1);\n }\n\n if (tbodyLines) {\n token = state.push('tbody_close', 'tbody', -1);\n tbodyLines[1] = nextLine;\n }\n\n token = state.push('table_close', 'table', -1);\n tableLines[1] = nextLine;\n state.parentType = oldParentType;\n state.line = nextLine;\n return true;\n};","// Code block (4 spaces padded)\n'use strict';\n\nmodule.exports = function code(state, startLine, endLine\n/*, silent*/\n) {\n var nextLine, last, token;\n\n if (state.sCount[startLine] - state.blkIndent < 4) {\n return false;\n }\n\n last = nextLine = startLine + 1;\n\n while (nextLine < endLine) {\n if (state.isEmpty(nextLine)) {\n nextLine++;\n continue;\n }\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n nextLine++;\n last = nextLine;\n continue;\n }\n\n break;\n }\n\n state.line = last;\n token = state.push('code_block', 'code', 0);\n token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + '\\n';\n token.map = [startLine, state.line];\n return true;\n};","// fences (``` lang, ~~~ lang)\n'use strict';\n\nmodule.exports = function fence(state, startLine, endLine, silent) {\n var marker,\n len,\n params,\n nextLine,\n mem,\n token,\n markup,\n haveEndMarker = false,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine]; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n if (pos + 3 > max) {\n return false;\n }\n\n marker = state.src.charCodeAt(pos);\n\n if (marker !== 0x7E\n /* ~ */\n && marker !== 0x60\n /* ` */\n ) {\n return false;\n } // scan marker length\n\n\n mem = pos;\n pos = state.skipChars(pos, marker);\n len = pos - mem;\n\n if (len < 3) {\n return false;\n }\n\n markup = state.src.slice(mem, pos);\n params = state.src.slice(pos, max);\n\n if (marker === 0x60\n /* ` */\n ) {\n if (params.indexOf(String.fromCharCode(marker)) >= 0) {\n return false;\n }\n } // Since start is found, we can report success here in validation mode\n\n\n if (silent) {\n return true;\n } // search end of block\n\n\n nextLine = startLine;\n\n for (;;) {\n nextLine++;\n\n if (nextLine >= endLine) {\n // unclosed block should be autoclosed by end of document.\n // also block seems to be autoclosed by end of parent\n break;\n }\n\n pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos < max && state.sCount[nextLine] < state.blkIndent) {\n // non-empty line with negative indent should stop the list:\n // - ```\n // test\n break;\n }\n\n if (state.src.charCodeAt(pos) !== marker) {\n continue;\n }\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n // closing fence should be indented less than 4 spaces\n continue;\n }\n\n pos = state.skipChars(pos, marker); // closing code fence must be at least as long as the opening one\n\n if (pos - mem < len) {\n continue;\n } // make sure tail has spaces only\n\n\n pos = state.skipSpaces(pos);\n\n if (pos < max) {\n continue;\n }\n\n haveEndMarker = true; // found!\n\n break;\n } // If a fence has heading spaces, they should be removed from its inner block\n\n\n len = state.sCount[startLine];\n state.line = nextLine + (haveEndMarker ? 1 : 0);\n token = state.push('fence', 'code', 0);\n token.info = params;\n token.content = state.getLines(startLine + 1, nextLine, len, true);\n token.markup = markup;\n token.map = [startLine, state.line];\n return true;\n};","// Block quotes\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function blockquote(state, startLine, endLine, silent) {\n var adjustTab,\n ch,\n i,\n initial,\n l,\n lastLineEmpty,\n lines,\n nextLine,\n offset,\n oldBMarks,\n oldBSCount,\n oldIndent,\n oldParentType,\n oldSCount,\n oldTShift,\n spaceAfterMarker,\n terminate,\n terminatorRules,\n token,\n isOutdented,\n oldLineMax = state.lineMax,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine]; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n } // check the block quote marker\n\n\n if (state.src.charCodeAt(pos) !== 0x3E\n /* > */\n ) {\n return false;\n } // we know that it's going to be a valid blockquote,\n // so no point trying to find the end of it in silent mode\n\n\n if (silent) {\n return true;\n }\n\n oldBMarks = [];\n oldBSCount = [];\n oldSCount = [];\n oldTShift = [];\n terminatorRules = state.md.block.ruler.getRules('blockquote');\n oldParentType = state.parentType;\n state.parentType = 'blockquote'; // Search the end of the block\n //\n // Block ends with either:\n // 1. an empty line outside:\n // ```\n // > test\n //\n // ```\n // 2. an empty line inside:\n // ```\n // >\n // test\n // ```\n // 3. another tag:\n // ```\n // > test\n // - - -\n // ```\n\n for (nextLine = startLine; nextLine < endLine; nextLine++) {\n // check if it's outdented, i.e. it's inside list item and indented\n // less than said list item:\n //\n // ```\n // 1. anything\n // > current blockquote\n // 2. checking this line\n // ```\n isOutdented = state.sCount[nextLine] < state.blkIndent;\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos >= max) {\n // Case 1: line is not inside the blockquote, and this line is empty.\n break;\n }\n\n if (state.src.charCodeAt(pos++) === 0x3E\n /* > */\n && !isOutdented) {\n // This line is inside the blockquote.\n // set offset past spaces and \">\"\n initial = state.sCount[nextLine] + 1; // skip one optional space after '>'\n\n if (state.src.charCodeAt(pos) === 0x20\n /* space */\n ) {\n // ' > test '\n // ^ -- position start of line here:\n pos++;\n initial++;\n adjustTab = false;\n spaceAfterMarker = true;\n } else if (state.src.charCodeAt(pos) === 0x09\n /* tab */\n ) {\n spaceAfterMarker = true;\n\n if ((state.bsCount[nextLine] + initial) % 4 === 3) {\n // ' >\\t test '\n // ^ -- position start of line here (tab has width===1)\n pos++;\n initial++;\n adjustTab = false;\n } else {\n // ' >\\t test '\n // ^ -- position start of line here + shift bsCount slightly\n // to make extra space appear\n adjustTab = true;\n }\n } else {\n spaceAfterMarker = false;\n }\n\n offset = initial;\n oldBMarks.push(state.bMarks[nextLine]);\n state.bMarks[nextLine] = pos;\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (isSpace(ch)) {\n if (ch === 0x09) {\n offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;\n } else {\n offset++;\n }\n } else {\n break;\n }\n\n pos++;\n }\n\n lastLineEmpty = pos >= max;\n oldBSCount.push(state.bsCount[nextLine]);\n state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);\n oldSCount.push(state.sCount[nextLine]);\n state.sCount[nextLine] = offset - initial;\n oldTShift.push(state.tShift[nextLine]);\n state.tShift[nextLine] = pos - state.bMarks[nextLine];\n continue;\n } // Case 2: line is not inside the blockquote, and the last line was empty.\n\n\n if (lastLineEmpty) {\n break;\n } // Case 3: another tag found.\n\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n // Quirk to enforce \"hard termination mode\" for paragraphs;\n // normally if you call `tokenize(state, startLine, nextLine)`,\n // paragraphs will look below nextLine for paragraph continuation,\n // but if blockquote is terminated by another tag, they shouldn't\n state.lineMax = nextLine;\n\n if (state.blkIndent !== 0) {\n // state.blkIndent was non-zero, we now set it to zero,\n // so we need to re-calculate all offsets to appear as\n // if indent wasn't changed\n oldBMarks.push(state.bMarks[nextLine]);\n oldBSCount.push(state.bsCount[nextLine]);\n oldTShift.push(state.tShift[nextLine]);\n oldSCount.push(state.sCount[nextLine]);\n state.sCount[nextLine] -= state.blkIndent;\n }\n\n break;\n }\n\n oldBMarks.push(state.bMarks[nextLine]);\n oldBSCount.push(state.bsCount[nextLine]);\n oldTShift.push(state.tShift[nextLine]);\n oldSCount.push(state.sCount[nextLine]); // A negative indentation means that this is a paragraph continuation\n //\n\n state.sCount[nextLine] = -1;\n }\n\n oldIndent = state.blkIndent;\n state.blkIndent = 0;\n token = state.push('blockquote_open', 'blockquote', 1);\n token.markup = '>';\n token.map = lines = [startLine, 0];\n state.md.block.tokenize(state, startLine, nextLine);\n token = state.push('blockquote_close', 'blockquote', -1);\n token.markup = '>';\n state.lineMax = oldLineMax;\n state.parentType = oldParentType;\n lines[1] = state.line; // Restore original tShift; this might not be necessary since the parser\n // has already been here, but just to make sure we can do that.\n\n for (i = 0; i < oldTShift.length; i++) {\n state.bMarks[i + startLine] = oldBMarks[i];\n state.tShift[i + startLine] = oldTShift[i];\n state.sCount[i + startLine] = oldSCount[i];\n state.bsCount[i + startLine] = oldBSCount[i];\n }\n\n state.blkIndent = oldIndent;\n return true;\n};","// Horizontal rule\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function hr(state, startLine, endLine, silent) {\n var marker,\n cnt,\n ch,\n token,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine]; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n marker = state.src.charCodeAt(pos++); // Check hr marker\n\n if (marker !== 0x2A\n /* * */\n && marker !== 0x2D\n /* - */\n && marker !== 0x5F\n /* _ */\n ) {\n return false;\n } // markers can be mixed with spaces, but there should be at least 3 of them\n\n\n cnt = 1;\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos++);\n\n if (ch !== marker && !isSpace(ch)) {\n return false;\n }\n\n if (ch === marker) {\n cnt++;\n }\n }\n\n if (cnt < 3) {\n return false;\n }\n\n if (silent) {\n return true;\n }\n\n state.line = startLine + 1;\n token = state.push('hr', 'hr', 0);\n token.map = [startLine, state.line];\n token.markup = Array(cnt + 1).join(String.fromCharCode(marker));\n return true;\n};","// Lists\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace; // Search `[-+*][\\n ]`, returns next pos after marker on success\n// or -1 on fail.\n\n\nfunction skipBulletListMarker(state, startLine) {\n var marker, pos, max, ch;\n pos = state.bMarks[startLine] + state.tShift[startLine];\n max = state.eMarks[startLine];\n marker = state.src.charCodeAt(pos++); // Check bullet\n\n if (marker !== 0x2A\n /* * */\n && marker !== 0x2D\n /* - */\n && marker !== 0x2B\n /* + */\n ) {\n return -1;\n }\n\n if (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n // \" -test \" - is not a list item\n return -1;\n }\n }\n\n return pos;\n} // Search `\\d+[.)][\\n ]`, returns next pos after marker on success\n// or -1 on fail.\n\n\nfunction skipOrderedListMarker(state, startLine) {\n var ch,\n start = state.bMarks[startLine] + state.tShift[startLine],\n pos = start,\n max = state.eMarks[startLine]; // List marker should have at least 2 chars (digit + dot)\n\n if (pos + 1 >= max) {\n return -1;\n }\n\n ch = state.src.charCodeAt(pos++);\n\n if (ch < 0x30\n /* 0 */\n || ch > 0x39\n /* 9 */\n ) {\n return -1;\n }\n\n for (;;) {\n // EOL -> fail\n if (pos >= max) {\n return -1;\n }\n\n ch = state.src.charCodeAt(pos++);\n\n if (ch >= 0x30\n /* 0 */\n && ch <= 0x39\n /* 9 */\n ) {\n // List marker should have no more than 9 digits\n // (prevents integer overflow in browsers)\n if (pos - start >= 10) {\n return -1;\n }\n\n continue;\n } // found valid marker\n\n\n if (ch === 0x29\n /* ) */\n || ch === 0x2e\n /* . */\n ) {\n break;\n }\n\n return -1;\n }\n\n if (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n // \" 1.test \" - is not a list item\n return -1;\n }\n }\n\n return pos;\n}\n\nfunction markTightParagraphs(state, idx) {\n var i,\n l,\n level = state.level + 2;\n\n for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) {\n if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {\n state.tokens[i + 2].hidden = true;\n state.tokens[i].hidden = true;\n i += 2;\n }\n }\n}\n\nmodule.exports = function list(state, startLine, endLine, silent) {\n var ch,\n contentStart,\n i,\n indent,\n indentAfterMarker,\n initial,\n isOrdered,\n itemLines,\n l,\n listLines,\n listTokIdx,\n markerCharCode,\n markerValue,\n max,\n offset,\n oldListIndent,\n oldParentType,\n oldSCount,\n oldTShift,\n oldTight,\n pos,\n posAfterMarker,\n prevEmptyEnd,\n start,\n terminate,\n terminatorRules,\n token,\n nextLine = startLine,\n isTerminatingParagraph = false,\n tight = true; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n return false;\n } // Special case:\n // - item 1\n // - item 2\n // - item 3\n // - item 4\n // - this one is a paragraph continuation\n\n\n if (state.listIndent >= 0 && state.sCount[nextLine] - state.listIndent >= 4 && state.sCount[nextLine] < state.blkIndent) {\n return false;\n } // limit conditions when list can interrupt\n // a paragraph (validation mode only)\n\n\n if (silent && state.parentType === 'paragraph') {\n // Next list item should still terminate previous list item;\n //\n // This code can fail if plugins use blkIndent as well as lists,\n // but I hope the spec gets fixed long before that happens.\n //\n if (state.sCount[nextLine] >= state.blkIndent) {\n isTerminatingParagraph = true;\n }\n } // Detect list type and position after marker\n\n\n if ((posAfterMarker = skipOrderedListMarker(state, nextLine)) >= 0) {\n isOrdered = true;\n start = state.bMarks[nextLine] + state.tShift[nextLine];\n markerValue = Number(state.src.slice(start, posAfterMarker - 1)); // If we're starting a new ordered list right after\n // a paragraph, it should start with 1.\n\n if (isTerminatingParagraph && markerValue !== 1) return false;\n } else if ((posAfterMarker = skipBulletListMarker(state, nextLine)) >= 0) {\n isOrdered = false;\n } else {\n return false;\n } // If we're starting a new unordered list right after\n // a paragraph, first line should not be empty.\n\n\n if (isTerminatingParagraph) {\n if (state.skipSpaces(posAfterMarker) >= state.eMarks[nextLine]) return false;\n } // For validation mode we can terminate immediately\n\n\n if (silent) {\n return true;\n } // We should terminate list on style change. Remember first one to compare.\n\n\n markerCharCode = state.src.charCodeAt(posAfterMarker - 1); // Start list\n\n listTokIdx = state.tokens.length;\n\n if (isOrdered) {\n token = state.push('ordered_list_open', 'ol', 1);\n\n if (markerValue !== 1) {\n token.attrs = [['start', markerValue]];\n }\n } else {\n token = state.push('bullet_list_open', 'ul', 1);\n }\n\n token.map = listLines = [nextLine, 0];\n token.markup = String.fromCharCode(markerCharCode); //\n // Iterate list items\n //\n\n prevEmptyEnd = false;\n terminatorRules = state.md.block.ruler.getRules('list');\n oldParentType = state.parentType;\n state.parentType = 'list';\n\n while (nextLine < endLine) {\n pos = posAfterMarker;\n max = state.eMarks[nextLine];\n initial = offset = state.sCount[nextLine] + posAfterMarker - (state.bMarks[nextLine] + state.tShift[nextLine]);\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (ch === 0x09) {\n offset += 4 - (offset + state.bsCount[nextLine]) % 4;\n } else if (ch === 0x20) {\n offset++;\n } else {\n break;\n }\n\n pos++;\n }\n\n contentStart = pos;\n\n if (contentStart >= max) {\n // trimming space in \"- \\n 3\" case, indent is 1 here\n indentAfterMarker = 1;\n } else {\n indentAfterMarker = offset - initial;\n } // If we have more than 4 spaces, the indent is 1\n // (the rest is just indented code block)\n\n\n if (indentAfterMarker > 4) {\n indentAfterMarker = 1;\n } // \" - test\"\n // ^^^^^ - calculating total length of this thing\n\n\n indent = initial + indentAfterMarker; // Run subparser & write tokens\n\n token = state.push('list_item_open', 'li', 1);\n token.markup = String.fromCharCode(markerCharCode);\n token.map = itemLines = [nextLine, 0];\n\n if (isOrdered) {\n token.info = state.src.slice(start, posAfterMarker - 1);\n } // change current state, then restore it after parser subcall\n\n\n oldTight = state.tight;\n oldTShift = state.tShift[nextLine];\n oldSCount = state.sCount[nextLine]; // - example list\n // ^ listIndent position will be here\n // ^ blkIndent position will be here\n //\n\n oldListIndent = state.listIndent;\n state.listIndent = state.blkIndent;\n state.blkIndent = indent;\n state.tight = true;\n state.tShift[nextLine] = contentStart - state.bMarks[nextLine];\n state.sCount[nextLine] = offset;\n\n if (contentStart >= max && state.isEmpty(nextLine + 1)) {\n // workaround for this case\n // (list item is empty, list terminates before \"foo\"):\n // ~~~~~~~~\n // -\n //\n // foo\n // ~~~~~~~~\n state.line = Math.min(state.line + 2, endLine);\n } else {\n state.md.block.tokenize(state, nextLine, endLine, true);\n } // If any of list item is tight, mark list as tight\n\n\n if (!state.tight || prevEmptyEnd) {\n tight = false;\n } // Item become loose if finish with empty line,\n // but we should filter last element, because it means list finish\n\n\n prevEmptyEnd = state.line - nextLine > 1 && state.isEmpty(state.line - 1);\n state.blkIndent = state.listIndent;\n state.listIndent = oldListIndent;\n state.tShift[nextLine] = oldTShift;\n state.sCount[nextLine] = oldSCount;\n state.tight = oldTight;\n token = state.push('list_item_close', 'li', -1);\n token.markup = String.fromCharCode(markerCharCode);\n nextLine = state.line;\n itemLines[1] = nextLine;\n\n if (nextLine >= endLine) {\n break;\n } //\n // Try to check if list is terminated or continued.\n //\n\n\n if (state.sCount[nextLine] < state.blkIndent) {\n break;\n } // if it's indented more than 3 spaces, it should be a code block\n\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n break;\n } // fail if terminating block found\n\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n break;\n } // fail if list has another type\n\n\n if (isOrdered) {\n posAfterMarker = skipOrderedListMarker(state, nextLine);\n\n if (posAfterMarker < 0) {\n break;\n }\n\n start = state.bMarks[nextLine] + state.tShift[nextLine];\n } else {\n posAfterMarker = skipBulletListMarker(state, nextLine);\n\n if (posAfterMarker < 0) {\n break;\n }\n }\n\n if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) {\n break;\n }\n } // Finalize list\n\n\n if (isOrdered) {\n token = state.push('ordered_list_close', 'ol', -1);\n } else {\n token = state.push('bullet_list_close', 'ul', -1);\n }\n\n token.markup = String.fromCharCode(markerCharCode);\n listLines[1] = nextLine;\n state.line = nextLine;\n state.parentType = oldParentType; // mark paragraphs tight if needed\n\n if (tight) {\n markTightParagraphs(state, listTokIdx);\n }\n\n return true;\n};","'use strict';\n\nvar normalizeReference = require('../common/utils').normalizeReference;\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function reference(state, startLine, _endLine, silent) {\n var ch,\n destEndPos,\n destEndLineNo,\n endLine,\n href,\n i,\n l,\n label,\n labelEnd,\n oldParentType,\n res,\n start,\n str,\n terminate,\n terminatorRules,\n title,\n lines = 0,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine],\n nextLine = startLine + 1; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n if (state.src.charCodeAt(pos) !== 0x5B\n /* [ */\n ) {\n return false;\n } // Simple check to quickly interrupt scan on [link](url) at the start of line.\n // Can be useful on practice: https://github.com/markdown-it/markdown-it/issues/54\n\n\n while (++pos < max) {\n if (state.src.charCodeAt(pos) === 0x5D\n /* ] */\n && state.src.charCodeAt(pos - 1) !== 0x5C\n /* \\ */\n ) {\n if (pos + 1 === max) {\n return false;\n }\n\n if (state.src.charCodeAt(pos + 1) !== 0x3A\n /* : */\n ) {\n return false;\n }\n\n break;\n }\n }\n\n endLine = state.lineMax; // jump line-by-line until empty one or EOF\n\n terminatorRules = state.md.block.ruler.getRules('reference');\n oldParentType = state.parentType;\n state.parentType = 'reference';\n\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) {\n continue;\n } // quirk for blockquotes, this line should already be checked by that rule\n\n\n if (state.sCount[nextLine] < 0) {\n continue;\n } // Some tags can terminate paragraph without empty line.\n\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n break;\n }\n }\n\n str = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n max = str.length;\n\n for (pos = 1; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n\n if (ch === 0x5B\n /* [ */\n ) {\n return false;\n } else if (ch === 0x5D\n /* ] */\n ) {\n labelEnd = pos;\n break;\n } else if (ch === 0x0A\n /* \\n */\n ) {\n lines++;\n } else if (ch === 0x5C\n /* \\ */\n ) {\n pos++;\n\n if (pos < max && str.charCodeAt(pos) === 0x0A) {\n lines++;\n }\n }\n }\n\n if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A\n /* : */\n ) {\n return false;\n } // [label]: destination 'title'\n // ^^^ skip optional whitespace here\n\n\n for (pos = labelEnd + 2; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n\n if (ch === 0x0A) {\n lines++;\n } else if (isSpace(ch)) {\n /*eslint no-empty:0*/\n } else {\n break;\n }\n } // [label]: destination 'title'\n // ^^^^^^^^^^^ parse this\n\n\n res = state.md.helpers.parseLinkDestination(str, pos, max);\n\n if (!res.ok) {\n return false;\n }\n\n href = state.md.normalizeLink(res.str);\n\n if (!state.md.validateLink(href)) {\n return false;\n }\n\n pos = res.pos;\n lines += res.lines; // save cursor state, we could require to rollback later\n\n destEndPos = pos;\n destEndLineNo = lines; // [label]: destination 'title'\n // ^^^ skipping those spaces\n\n start = pos;\n\n for (; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n\n if (ch === 0x0A) {\n lines++;\n } else if (isSpace(ch)) {\n /*eslint no-empty:0*/\n } else {\n break;\n }\n } // [label]: destination 'title'\n // ^^^^^^^ parse this\n\n\n res = state.md.helpers.parseLinkTitle(str, pos, max);\n\n if (pos < max && start !== pos && res.ok) {\n title = res.str;\n pos = res.pos;\n lines += res.lines;\n } else {\n title = '';\n pos = destEndPos;\n lines = destEndLineNo;\n } // skip trailing spaces until the rest of the line\n\n\n while (pos < max) {\n ch = str.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n break;\n }\n\n pos++;\n }\n\n if (pos < max && str.charCodeAt(pos) !== 0x0A) {\n if (title) {\n // garbage at the end of the line after title,\n // but it could still be a valid reference if we roll back\n title = '';\n pos = destEndPos;\n lines = destEndLineNo;\n\n while (pos < max) {\n ch = str.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n break;\n }\n\n pos++;\n }\n }\n }\n\n if (pos < max && str.charCodeAt(pos) !== 0x0A) {\n // garbage at the end of the line\n return false;\n }\n\n label = normalizeReference(str.slice(1, labelEnd));\n\n if (!label) {\n // CommonMark 0.20 disallows empty labels\n return false;\n } // Reference can not terminate anything. This check is for safety only.\n\n /*istanbul ignore if*/\n\n\n if (silent) {\n return true;\n }\n\n if (typeof state.env.references === 'undefined') {\n state.env.references = {};\n }\n\n if (typeof state.env.references[label] === 'undefined') {\n state.env.references[label] = {\n title: title,\n href: href\n };\n }\n\n state.parentType = oldParentType;\n state.line = startLine + lines + 1;\n return true;\n};","// HTML block\n'use strict';\n\nvar block_names = require('../common/html_blocks');\n\nvar HTML_OPEN_CLOSE_TAG_RE = require('../common/html_re').HTML_OPEN_CLOSE_TAG_RE; // An array of opening and corresponding closing sequences for html tags,\n// last argument defines whether it can terminate a paragraph or not\n//\n\n\nvar HTML_SEQUENCES = [[/^<(script|pre|style|textarea)(?=(\\s|>|$))/i, /<\\/(script|pre|style|textarea)>/i, true], [/^/, true], [/^<\\?/, /\\?>/, true], [/^/, true], [/^/, true], [new RegExp('^?(' + block_names.join('|') + ')(?=(\\\\s|/?>|$))', 'i'), /^$/, true], [new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\\\s*$'), /^$/, false]];\n\nmodule.exports = function html_block(state, startLine, endLine, silent) {\n var i,\n nextLine,\n token,\n lineText,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine]; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n if (!state.md.options.html) {\n return false;\n }\n\n if (state.src.charCodeAt(pos) !== 0x3C\n /* < */\n ) {\n return false;\n }\n\n lineText = state.src.slice(pos, max);\n\n for (i = 0; i < HTML_SEQUENCES.length; i++) {\n if (HTML_SEQUENCES[i][0].test(lineText)) {\n break;\n }\n }\n\n if (i === HTML_SEQUENCES.length) {\n return false;\n }\n\n if (silent) {\n // true if this sequence can be a terminator, false otherwise\n return HTML_SEQUENCES[i][2];\n }\n\n nextLine = startLine + 1; // If we are here - we detected HTML block.\n // Let's roll down till block end.\n\n if (!HTML_SEQUENCES[i][1].test(lineText)) {\n for (; nextLine < endLine; nextLine++) {\n if (state.sCount[nextLine] < state.blkIndent) {\n break;\n }\n\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n lineText = state.src.slice(pos, max);\n\n if (HTML_SEQUENCES[i][1].test(lineText)) {\n if (lineText.length !== 0) {\n nextLine++;\n }\n\n break;\n }\n }\n }\n\n state.line = nextLine;\n token = state.push('html_block', '', 0);\n token.map = [startLine, nextLine];\n token.content = state.getLines(startLine, nextLine, state.blkIndent, true);\n return true;\n};","// List of valid html blocks names, accorting to commonmark spec\n// http://jgm.github.io/CommonMark/spec.html#html-blocks\n'use strict';\n\nmodule.exports = ['address', 'article', 'aside', 'base', 'basefont', 'blockquote', 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'nav', 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'section', 'source', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'track', 'ul'];","// heading (#, ##, ...)\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function heading(state, startLine, endLine, silent) {\n var ch,\n level,\n tmp,\n token,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine]; // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x23\n /* # */\n || pos >= max) {\n return false;\n } // count heading level\n\n\n level = 1;\n ch = state.src.charCodeAt(++pos);\n\n while (ch === 0x23\n /* # */\n && pos < max && level <= 6) {\n level++;\n ch = state.src.charCodeAt(++pos);\n }\n\n if (level > 6 || pos < max && !isSpace(ch)) {\n return false;\n }\n\n if (silent) {\n return true;\n } // Let's cut tails like ' ### ' from the end of string\n\n\n max = state.skipSpacesBack(max, pos);\n tmp = state.skipCharsBack(max, 0x23, pos); // #\n\n if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {\n max = tmp;\n }\n\n state.line = startLine + 1;\n token = state.push('heading_open', 'h' + String(level), 1);\n token.markup = '########'.slice(0, level);\n token.map = [startLine, state.line];\n token = state.push('inline', '', 0);\n token.content = state.src.slice(pos, max).trim();\n token.map = [startLine, state.line];\n token.children = [];\n token = state.push('heading_close', 'h' + String(level), -1);\n token.markup = '########'.slice(0, level);\n return true;\n};","// lheading (---, ===)\n'use strict';\n\nmodule.exports = function lheading(state, startLine, endLine\n/*, silent*/\n) {\n var content,\n terminate,\n i,\n l,\n token,\n pos,\n max,\n level,\n marker,\n nextLine = startLine + 1,\n oldParentType,\n terminatorRules = state.md.block.ruler.getRules('paragraph'); // if it's indented more than 3 spaces, it should be a code block\n\n if (state.sCount[startLine] - state.blkIndent >= 4) {\n return false;\n }\n\n oldParentType = state.parentType;\n state.parentType = 'paragraph'; // use paragraph to match terminatorRules\n // jump line-by-line until empty one or EOF\n\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) {\n continue;\n } //\n // Check for underline in setext header\n //\n\n\n if (state.sCount[nextLine] >= state.blkIndent) {\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos < max) {\n marker = state.src.charCodeAt(pos);\n\n if (marker === 0x2D\n /* - */\n || marker === 0x3D\n /* = */\n ) {\n pos = state.skipChars(pos, marker);\n pos = state.skipSpaces(pos);\n\n if (pos >= max) {\n level = marker === 0x3D\n /* = */\n ? 1 : 2;\n break;\n }\n }\n }\n } // quirk for blockquotes, this line should already be checked by that rule\n\n\n if (state.sCount[nextLine] < 0) {\n continue;\n } // Some tags can terminate paragraph without empty line.\n\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n break;\n }\n }\n\n if (!level) {\n // Didn't find valid underline\n return false;\n }\n\n content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n state.line = nextLine + 1;\n token = state.push('heading_open', 'h' + String(level), 1);\n token.markup = String.fromCharCode(marker);\n token.map = [startLine, state.line];\n token = state.push('inline', '', 0);\n token.content = content;\n token.map = [startLine, state.line - 1];\n token.children = [];\n token = state.push('heading_close', 'h' + String(level), -1);\n token.markup = String.fromCharCode(marker);\n state.parentType = oldParentType;\n return true;\n};","// Paragraph\n'use strict';\n\nmodule.exports = function paragraph(state, startLine, endLine) {\n var content,\n terminate,\n i,\n l,\n token,\n oldParentType,\n nextLine = startLine + 1,\n terminatorRules = state.md.block.ruler.getRules('paragraph');\n oldParentType = state.parentType;\n state.parentType = 'paragraph'; // jump line-by-line until empty one or EOF\n\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) {\n continue;\n } // quirk for blockquotes, this line should already be checked by that rule\n\n\n if (state.sCount[nextLine] < 0) {\n continue;\n } // Some tags can terminate paragraph without empty line.\n\n\n terminate = false;\n\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n break;\n }\n }\n\n content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n state.line = nextLine;\n token = state.push('paragraph_open', 'p', 1);\n token.map = [startLine, state.line];\n token = state.push('inline', '', 0);\n token.content = content;\n token.map = [startLine, state.line];\n token.children = [];\n token = state.push('paragraph_close', 'p', -1);\n state.parentType = oldParentType;\n return true;\n};","// Parser state class\n'use strict';\n\nvar Token = require('../token');\n\nvar isSpace = require('../common/utils').isSpace;\n\nfunction StateBlock(src, md, env, tokens) {\n var ch, s, start, pos, len, indent, offset, indent_found;\n this.src = src; // link to parser instance\n\n this.md = md;\n this.env = env; //\n // Internal state vartiables\n //\n\n this.tokens = tokens;\n this.bMarks = []; // line begin offsets for fast jumps\n\n this.eMarks = []; // line end offsets for fast jumps\n\n this.tShift = []; // offsets of the first non-space characters (tabs not expanded)\n\n this.sCount = []; // indents for each line (tabs expanded)\n // An amount of virtual spaces (tabs expanded) between beginning\n // of each line (bMarks) and real beginning of that line.\n //\n // It exists only as a hack because blockquotes override bMarks\n // losing information in the process.\n //\n // It's used only when expanding tabs, you can think about it as\n // an initial tab length, e.g. bsCount=21 applied to string `\\t123`\n // means first tab should be expanded to 4-21%4 === 3 spaces.\n //\n\n this.bsCount = []; // block parser variables\n\n this.blkIndent = 0; // required block content indent (for example, if we are\n // inside a list, it would be positioned after list marker)\n\n this.line = 0; // line index in src\n\n this.lineMax = 0; // lines count\n\n this.tight = false; // loose/tight mode for lists\n\n this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)\n\n this.listIndent = -1; // indent of the current list block (-1 if there isn't any)\n // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'\n // used in lists to determine if they interrupt a paragraph\n\n this.parentType = 'root';\n this.level = 0; // renderer\n\n this.result = ''; // Create caches\n // Generate markers.\n\n s = this.src;\n indent_found = false;\n\n for (start = pos = indent = offset = 0, len = s.length; pos < len; pos++) {\n ch = s.charCodeAt(pos);\n\n if (!indent_found) {\n if (isSpace(ch)) {\n indent++;\n\n if (ch === 0x09) {\n offset += 4 - offset % 4;\n } else {\n offset++;\n }\n\n continue;\n } else {\n indent_found = true;\n }\n }\n\n if (ch === 0x0A || pos === len - 1) {\n if (ch !== 0x0A) {\n pos++;\n }\n\n this.bMarks.push(start);\n this.eMarks.push(pos);\n this.tShift.push(indent);\n this.sCount.push(offset);\n this.bsCount.push(0);\n indent_found = false;\n indent = 0;\n offset = 0;\n start = pos + 1;\n }\n } // Push fake entry to simplify cache bounds checks\n\n\n this.bMarks.push(s.length);\n this.eMarks.push(s.length);\n this.tShift.push(0);\n this.sCount.push(0);\n this.bsCount.push(0);\n this.lineMax = this.bMarks.length - 1; // don't count last fake line\n} // Push new token to \"stream\".\n//\n\n\nStateBlock.prototype.push = function (type, tag, nesting) {\n var token = new Token(type, tag, nesting);\n token.block = true;\n if (nesting < 0) this.level--; // closing tag\n\n token.level = this.level;\n if (nesting > 0) this.level++; // opening tag\n\n this.tokens.push(token);\n return token;\n};\n\nStateBlock.prototype.isEmpty = function isEmpty(line) {\n return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];\n};\n\nStateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {\n for (var max = this.lineMax; from < max; from++) {\n if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {\n break;\n }\n }\n\n return from;\n}; // Skip spaces from given position.\n\n\nStateBlock.prototype.skipSpaces = function skipSpaces(pos) {\n var ch;\n\n for (var max = this.src.length; pos < max; pos++) {\n ch = this.src.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n break;\n }\n }\n\n return pos;\n}; // Skip spaces from given position in reverse.\n\n\nStateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {\n if (pos <= min) {\n return pos;\n }\n\n while (pos > min) {\n if (!isSpace(this.src.charCodeAt(--pos))) {\n return pos + 1;\n }\n }\n\n return pos;\n}; // Skip char codes from given position\n\n\nStateBlock.prototype.skipChars = function skipChars(pos, code) {\n for (var max = this.src.length; pos < max; pos++) {\n if (this.src.charCodeAt(pos) !== code) {\n break;\n }\n }\n\n return pos;\n}; // Skip char codes reverse from given position - 1\n\n\nStateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {\n if (pos <= min) {\n return pos;\n }\n\n while (pos > min) {\n if (code !== this.src.charCodeAt(--pos)) {\n return pos + 1;\n }\n }\n\n return pos;\n}; // cut lines range from source.\n\n\nStateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {\n var i,\n lineIndent,\n ch,\n first,\n last,\n queue,\n lineStart,\n line = begin;\n\n if (begin >= end) {\n return '';\n }\n\n queue = new Array(end - begin);\n\n for (i = 0; line < end; line++, i++) {\n lineIndent = 0;\n lineStart = first = this.bMarks[line];\n\n if (line + 1 < end || keepLastLF) {\n // No need for bounds check because we have fake entry on tail.\n last = this.eMarks[line] + 1;\n } else {\n last = this.eMarks[line];\n }\n\n while (first < last && lineIndent < indent) {\n ch = this.src.charCodeAt(first);\n\n if (isSpace(ch)) {\n if (ch === 0x09) {\n lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;\n } else {\n lineIndent++;\n }\n } else if (first - lineStart < this.tShift[line]) {\n // patched tShift masked characters to look like spaces (blockquotes, list markers)\n lineIndent++;\n } else {\n break;\n }\n\n first++;\n }\n\n if (lineIndent > indent) {\n // partially expanding tabs in code blocks, e.g '\\t\\tfoobar'\n // with indent=2 becomes ' \\tfoobar'\n queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);\n } else {\n queue[i] = this.src.slice(first, last);\n }\n }\n\n return queue.join('');\n}; // re-export Token class to use in block rules\n\n\nStateBlock.prototype.Token = Token;\nmodule.exports = StateBlock;","/** internal\n * class ParserInline\n *\n * Tokenizes paragraph content.\n **/\n'use strict';\n\nvar Ruler = require('./ruler'); ////////////////////////////////////////////////////////////////////////////////\n// Parser rules\n\n\nvar _rules = [['text', require('./rules_inline/text')], ['linkify', require('./rules_inline/linkify')], ['newline', require('./rules_inline/newline')], ['escape', require('./rules_inline/escape')], ['backticks', require('./rules_inline/backticks')], ['strikethrough', require('./rules_inline/strikethrough').tokenize], ['emphasis', require('./rules_inline/emphasis').tokenize], ['link', require('./rules_inline/link')], ['image', require('./rules_inline/image')], ['autolink', require('./rules_inline/autolink')], ['html_inline', require('./rules_inline/html_inline')], ['entity', require('./rules_inline/entity')]]; // `rule2` ruleset was created specifically for emphasis/strikethrough\n// post-processing and may be changed in the future.\n//\n// Don't use this for anything except pairs (plugins working with `balance_pairs`).\n//\n\nvar _rules2 = [['balance_pairs', require('./rules_inline/balance_pairs')], ['strikethrough', require('./rules_inline/strikethrough').postProcess], ['emphasis', require('./rules_inline/emphasis').postProcess], // rules for pairs separate '**' into its own text tokens, which may be left unused,\n// rule below merges unused segments back with the rest of the text\n['fragments_join', require('./rules_inline/fragments_join')]];\n/**\n * new ParserInline()\n **/\n\nfunction ParserInline() {\n var i;\n /**\n * ParserInline#ruler -> Ruler\n *\n * [[Ruler]] instance. Keep configuration of inline rules.\n **/\n\n this.ruler = new Ruler();\n\n for (i = 0; i < _rules.length; i++) {\n this.ruler.push(_rules[i][0], _rules[i][1]);\n }\n /**\n * ParserInline#ruler2 -> Ruler\n *\n * [[Ruler]] instance. Second ruler used for post-processing\n * (e.g. in emphasis-like rules).\n **/\n\n\n this.ruler2 = new Ruler();\n\n for (i = 0; i < _rules2.length; i++) {\n this.ruler2.push(_rules2[i][0], _rules2[i][1]);\n }\n} // Skip single token by running all rules in validation mode;\n// returns `true` if any rule reported success\n//\n\n\nParserInline.prototype.skipToken = function (state) {\n var ok,\n i,\n pos = state.pos,\n rules = this.ruler.getRules(''),\n len = rules.length,\n maxNesting = state.md.options.maxNesting,\n cache = state.cache;\n\n if (typeof cache[pos] !== 'undefined') {\n state.pos = cache[pos];\n return;\n }\n\n if (state.level < maxNesting) {\n for (i = 0; i < len; i++) {\n // Increment state.level and decrement it later to limit recursion.\n // It's harmless to do here, because no tokens are created. But ideally,\n // we'd need a separate private state variable for this purpose.\n //\n state.level++;\n ok = rules[i](state, true);\n state.level--;\n\n if (ok) {\n if (pos >= state.pos) {\n throw new Error(\"inline rule didn't increment state.pos\");\n }\n\n break;\n }\n }\n } else {\n // Too much nesting, just skip until the end of the paragraph.\n //\n // NOTE: this will cause links to behave incorrectly in the following case,\n // when an amount of `[` is exactly equal to `maxNesting + 1`:\n //\n // [[[[[[[[[[[[[[[[[[[[[foo]()\n //\n // TODO: remove this workaround when CM standard will allow nested links\n // (we can replace it by preventing links from being parsed in\n // validation mode)\n //\n state.pos = state.posMax;\n }\n\n if (!ok) {\n state.pos++;\n }\n\n cache[pos] = state.pos;\n}; // Generate tokens for input range\n//\n\n\nParserInline.prototype.tokenize = function (state) {\n var ok,\n i,\n prevPos,\n rules = this.ruler.getRules(''),\n len = rules.length,\n end = state.posMax,\n maxNesting = state.md.options.maxNesting;\n\n while (state.pos < end) {\n // Try all possible rules.\n // On success, rule should:\n //\n // - update `state.pos`\n // - update `state.tokens`\n // - return true\n prevPos = state.pos;\n\n if (state.level < maxNesting) {\n for (i = 0; i < len; i++) {\n ok = rules[i](state, false);\n\n if (ok) {\n if (prevPos >= state.pos) {\n throw new Error(\"inline rule didn't increment state.pos\");\n }\n\n break;\n }\n }\n }\n\n if (ok) {\n if (state.pos >= end) {\n break;\n }\n\n continue;\n }\n\n state.pending += state.src[state.pos++];\n }\n\n if (state.pending) {\n state.pushPending();\n }\n};\n/**\n * ParserInline.parse(str, md, env, outTokens)\n *\n * Process input string and push inline tokens into `outTokens`\n **/\n\n\nParserInline.prototype.parse = function (str, md, env, outTokens) {\n var i, rules, len;\n var state = new this.State(str, md, env, outTokens);\n this.tokenize(state);\n rules = this.ruler2.getRules('');\n len = rules.length;\n\n for (i = 0; i < len; i++) {\n rules[i](state);\n }\n};\n\nParserInline.prototype.State = require('./rules_inline/state_inline');\nmodule.exports = ParserInline;","// Skip text characters for text token, place those to pending buffer\n// and increment current pos\n'use strict'; // Rule to skip pure text\n// '{}$%@~+=:' reserved for extentions\n// !, \", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \\, ], ^, _, `, {, |, }, or ~\n// !!!! Don't confuse with \"Markdown ASCII Punctuation\" chars\n// http://spec.commonmark.org/0.15/#ascii-punctuation-character\n\nfunction isTerminatorChar(ch) {\n switch (ch) {\n case 0x0A\n /* \\n */\n :\n case 0x21\n /* ! */\n :\n case 0x23\n /* # */\n :\n case 0x24\n /* $ */\n :\n case 0x25\n /* % */\n :\n case 0x26\n /* & */\n :\n case 0x2A\n /* * */\n :\n case 0x2B\n /* + */\n :\n case 0x2D\n /* - */\n :\n case 0x3A\n /* : */\n :\n case 0x3C\n /* < */\n :\n case 0x3D\n /* = */\n :\n case 0x3E\n /* > */\n :\n case 0x40\n /* @ */\n :\n case 0x5B\n /* [ */\n :\n case 0x5C\n /* \\ */\n :\n case 0x5D\n /* ] */\n :\n case 0x5E\n /* ^ */\n :\n case 0x5F\n /* _ */\n :\n case 0x60\n /* ` */\n :\n case 0x7B\n /* { */\n :\n case 0x7D\n /* } */\n :\n case 0x7E\n /* ~ */\n :\n return true;\n\n default:\n return false;\n }\n}\n\nmodule.exports = function text(state, silent) {\n var pos = state.pos;\n\n while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {\n pos++;\n }\n\n if (pos === state.pos) {\n return false;\n }\n\n if (!silent) {\n state.pending += state.src.slice(state.pos, pos);\n }\n\n state.pos = pos;\n return true;\n}; // Alternative implementation, for memory.\n//\n// It costs 10% of performance, but allows extend terminators list, if place it\n// to `ParcerInline` property. Probably, will switch to it sometime, such\n// flexibility required.\n\n/*\nvar TERMINATOR_RE = /[\\n!#$%&*+\\-:<=>@[\\\\\\]^_`{}~]/;\n\nmodule.exports = function text(state, silent) {\n var pos = state.pos,\n idx = state.src.slice(pos).search(TERMINATOR_RE);\n\n // first char is terminator -> empty text\n if (idx === 0) { return false; }\n\n // no terminator -> text till end of string\n if (idx < 0) {\n if (!silent) { state.pending += state.src.slice(pos); }\n state.pos = state.src.length;\n return true;\n }\n\n if (!silent) { state.pending += state.src.slice(pos, pos + idx); }\n\n state.pos += idx;\n\n return true;\n};*/","// Process links like https://example.org/\n'use strict'; // RFC3986: scheme = ALPHA *( ALPHA / DIGIT / \"+\" / \"-\" / \".\" )\n\nvar SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i;\n\nmodule.exports = function linkify(state, silent) {\n var pos, max, match, proto, link, url, fullUrl, token;\n if (!state.md.options.linkify) return false;\n if (state.linkLevel > 0) return false;\n pos = state.pos;\n max = state.posMax;\n if (pos + 3 > max) return false;\n if (state.src.charCodeAt(pos) !== 0x3A\n /* : */\n ) return false;\n if (state.src.charCodeAt(pos + 1) !== 0x2F\n /* / */\n ) return false;\n if (state.src.charCodeAt(pos + 2) !== 0x2F\n /* / */\n ) return false;\n match = state.pending.match(SCHEME_RE);\n if (!match) return false;\n proto = match[1];\n link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length));\n if (!link) return false;\n url = link.url; // invalid link, but still detected by linkify somehow;\n // need to check to prevent infinite loop below\n\n if (url.length <= proto.length) return false; // disallow '*' at the end of the link (conflicts with emphasis)\n\n url = url.replace(/\\*+$/, '');\n fullUrl = state.md.normalizeLink(url);\n if (!state.md.validateLink(fullUrl)) return false;\n\n if (!silent) {\n state.pending = state.pending.slice(0, -proto.length);\n token = state.push('link_open', 'a', 1);\n token.attrs = [['href', fullUrl]];\n token.markup = 'linkify';\n token.info = 'auto';\n token = state.push('text', '', 0);\n token.content = state.md.normalizeLinkText(url);\n token = state.push('link_close', 'a', -1);\n token.markup = 'linkify';\n token.info = 'auto';\n }\n\n state.pos += url.length - proto.length;\n return true;\n};","/* globals __VUE_SSR_CONTEXT__ */\n\n// IMPORTANT: Do NOT use ES2015 features in this file (except for modules).\n// This module is a runtime utility for cleaner component module output and will\n// be included in the final webpack user bundle.\n\nexport default function normalizeComponent(\n scriptExports,\n render,\n staticRenderFns,\n functionalTemplate,\n injectStyles,\n scopeId,\n moduleIdentifier /* server only */,\n shadowMode /* vue-cli only */\n) {\n // Vue.extend constructor export interop\n var options =\n typeof scriptExports === 'function' ? scriptExports.options : scriptExports\n\n // render functions\n if (render) {\n options.render = render\n options.staticRenderFns = staticRenderFns\n options._compiled = true\n }\n\n // functional template\n if (functionalTemplate) {\n options.functional = true\n }\n\n // scopedId\n if (scopeId) {\n options._scopeId = 'data-v-' + scopeId\n }\n\n var hook\n if (moduleIdentifier) {\n // server build\n hook = function (context) {\n // 2.3 injection\n context =\n context || // cached call\n (this.$vnode && this.$vnode.ssrContext) || // stateful\n (this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional\n // 2.2 with runInNewContext: true\n if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') {\n context = __VUE_SSR_CONTEXT__\n }\n // inject component styles\n if (injectStyles) {\n injectStyles.call(this, context)\n }\n // register component module identifier for async chunk inferrence\n if (context && context._registeredComponents) {\n context._registeredComponents.add(moduleIdentifier)\n }\n }\n // used by ssr in case component is cached and beforeCreate\n // never gets called\n options._ssrRegister = hook\n } else if (injectStyles) {\n hook = shadowMode\n ? function () {\n injectStyles.call(\n this,\n (options.functional ? this.parent : this).$root.$options.shadowRoot\n )\n }\n : injectStyles\n }\n\n if (hook) {\n if (options.functional) {\n // for template-only hot-reload because in that case the render fn doesn't\n // go through the normalizer\n options._injectStyles = hook\n // register for functional component in vue file\n var originalRender = options.render\n options.render = function renderWithStyleInjection(h, context) {\n hook.call(context)\n return originalRender(h, context)\n }\n } else {\n // inject component registration as beforeCreate hook\n var existing = options.beforeCreate\n options.beforeCreate = existing ? [].concat(existing, hook) : [hook]\n }\n }\n\n return {\n exports: scriptExports,\n options: options\n }\n}\n","// Proceess '\\n'\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function newline(state, silent) {\n var pmax,\n max,\n ws,\n pos = state.pos;\n\n if (state.src.charCodeAt(pos) !== 0x0A\n /* \\n */\n ) {\n return false;\n }\n\n pmax = state.pending.length - 1;\n max = state.posMax; // ' \\n' -> hardbreak\n // Lookup in pending chars is bad practice! Don't copy to other rules!\n // Pending string is stored in concat mode, indexed lookups will cause\n // convertion to flat mode.\n\n if (!silent) {\n if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {\n if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {\n // Find whitespaces tail of pending chars.\n ws = pmax - 1;\n\n while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) {\n ws--;\n }\n\n state.pending = state.pending.slice(0, ws);\n state.push('hardbreak', 'br', 0);\n } else {\n state.pending = state.pending.slice(0, -1);\n state.push('softbreak', 'br', 0);\n }\n } else {\n state.push('softbreak', 'br', 0);\n }\n }\n\n pos++; // skip heading spaces for next line\n\n while (pos < max && isSpace(state.src.charCodeAt(pos))) {\n pos++;\n }\n\n state.pos = pos;\n return true;\n};","// Process escaped chars and hardbreaks\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nvar ESCAPED = [];\n\nfor (var i = 0; i < 256; i++) {\n ESCAPED.push(0);\n}\n\n'\\\\!\"#$%&\\'()*+,./:;<=>?@[]^_`{|}~-'.split('').forEach(function (ch) {\n ESCAPED[ch.charCodeAt(0)] = 1;\n});\n\nmodule.exports = function escape(state, silent) {\n var ch1,\n ch2,\n origStr,\n escapedStr,\n token,\n pos = state.pos,\n max = state.posMax;\n if (state.src.charCodeAt(pos) !== 0x5C\n /* \\ */\n ) return false;\n pos++; // '\\' at the end of the inline block\n\n if (pos >= max) return false;\n ch1 = state.src.charCodeAt(pos);\n\n if (ch1 === 0x0A) {\n if (!silent) {\n state.push('hardbreak', 'br', 0);\n }\n\n pos++; // skip leading whitespaces from next line\n\n while (pos < max) {\n ch1 = state.src.charCodeAt(pos);\n if (!isSpace(ch1)) break;\n pos++;\n }\n\n state.pos = pos;\n return true;\n }\n\n escapedStr = state.src[pos];\n\n if (ch1 >= 0xD800 && ch1 <= 0xDBFF && pos + 1 < max) {\n ch2 = state.src.charCodeAt(pos + 1);\n\n if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {\n escapedStr += state.src[pos + 1];\n pos++;\n }\n }\n\n origStr = '\\\\' + escapedStr;\n\n if (!silent) {\n token = state.push('text_special', '', 0);\n\n if (ch1 < 256 && ESCAPED[ch1] !== 0) {\n token.content = escapedStr;\n } else {\n token.content = origStr;\n }\n\n token.markup = origStr;\n token.info = 'escape';\n }\n\n state.pos = pos + 1;\n return true;\n};","// Parse backticks\n'use strict';\n\nmodule.exports = function backtick(state, silent) {\n var start,\n max,\n marker,\n token,\n matchStart,\n matchEnd,\n openerLength,\n closerLength,\n pos = state.pos,\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x60\n /* ` */\n ) {\n return false;\n }\n\n start = pos;\n pos++;\n max = state.posMax; // scan marker length\n\n while (pos < max && state.src.charCodeAt(pos) === 0x60\n /* ` */\n ) {\n pos++;\n }\n\n marker = state.src.slice(start, pos);\n openerLength = marker.length;\n\n if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) {\n if (!silent) state.pending += marker;\n state.pos += openerLength;\n return true;\n }\n\n matchEnd = pos; // Nothing found in the cache, scan until the end of the line (or until marker is found)\n\n while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {\n matchEnd = matchStart + 1; // scan marker length\n\n while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60\n /* ` */\n ) {\n matchEnd++;\n }\n\n closerLength = matchEnd - matchStart;\n\n if (closerLength === openerLength) {\n // Found matching closer length.\n if (!silent) {\n token = state.push('code_inline', 'code', 0);\n token.markup = marker;\n token.content = state.src.slice(pos, matchStart).replace(/\\n/g, ' ').replace(/^ (.+) $/, '$1');\n }\n\n state.pos = matchEnd;\n return true;\n } // Some different length found, put it in cache as upper limit of where closer can be found\n\n\n state.backticks[closerLength] = matchStart;\n } // Scanned through the end, didn't find anything\n\n\n state.backticksScanned = true;\n if (!silent) state.pending += marker;\n state.pos += openerLength;\n return true;\n};","// Process [link]( \"stuff\")\n'use strict';\n\nvar normalizeReference = require('../common/utils').normalizeReference;\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function link(state, silent) {\n var attrs,\n code,\n label,\n labelEnd,\n labelStart,\n pos,\n res,\n ref,\n token,\n href = '',\n title = '',\n oldPos = state.pos,\n max = state.posMax,\n start = state.pos,\n parseReference = true;\n\n if (state.src.charCodeAt(state.pos) !== 0x5B\n /* [ */\n ) {\n return false;\n }\n\n labelStart = state.pos + 1;\n labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true); // parser failed to find ']', so it's not a valid link\n\n if (labelEnd < 0) {\n return false;\n }\n\n pos = labelEnd + 1;\n\n if (pos < max && state.src.charCodeAt(pos) === 0x28\n /* ( */\n ) {\n //\n // Inline link\n //\n // might have found a valid shortcut link, disable reference parsing\n parseReference = false; // [link]( \"title\" )\n // ^^ skipping these spaces\n\n pos++;\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n }\n\n if (pos >= max) {\n return false;\n } // [link]( \"title\" )\n // ^^^^^^ parsing link destination\n\n\n start = pos;\n res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);\n\n if (res.ok) {\n href = state.md.normalizeLink(res.str);\n\n if (state.md.validateLink(href)) {\n pos = res.pos;\n } else {\n href = '';\n } // [link]( \"title\" )\n // ^^ skipping these spaces\n\n\n start = pos;\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n } // [link]( \"title\" )\n // ^^^^^^^ parsing link title\n\n\n res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);\n\n if (pos < max && start !== pos && res.ok) {\n title = res.str;\n pos = res.pos; // [link]( \"title\" )\n // ^^ skipping these spaces\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n }\n }\n }\n\n if (pos >= max || state.src.charCodeAt(pos) !== 0x29\n /* ) */\n ) {\n // parsing a valid shortcut link failed, fallback to reference\n parseReference = true;\n }\n\n pos++;\n }\n\n if (parseReference) {\n //\n // Link reference\n //\n if (typeof state.env.references === 'undefined') {\n return false;\n }\n\n if (pos < max && state.src.charCodeAt(pos) === 0x5B\n /* [ */\n ) {\n start = pos + 1;\n pos = state.md.helpers.parseLinkLabel(state, pos);\n\n if (pos >= 0) {\n label = state.src.slice(start, pos++);\n } else {\n pos = labelEnd + 1;\n }\n } else {\n pos = labelEnd + 1;\n } // covers label === '' and label === undefined\n // (collapsed reference link and shortcut reference link respectively)\n\n\n if (!label) {\n label = state.src.slice(labelStart, labelEnd);\n }\n\n ref = state.env.references[normalizeReference(label)];\n\n if (!ref) {\n state.pos = oldPos;\n return false;\n }\n\n href = ref.href;\n title = ref.title;\n } //\n // We found the end of the link, and know for a fact it's a valid link;\n // so all that's left to do is to call tokenizer.\n //\n\n\n if (!silent) {\n state.pos = labelStart;\n state.posMax = labelEnd;\n token = state.push('link_open', 'a', 1);\n token.attrs = attrs = [['href', href]];\n\n if (title) {\n attrs.push(['title', title]);\n }\n\n state.linkLevel++;\n state.md.inline.tokenize(state);\n state.linkLevel--;\n token = state.push('link_close', 'a', -1);\n }\n\n state.pos = pos;\n state.posMax = max;\n return true;\n};","// Process \n'use strict';\n\nvar normalizeReference = require('../common/utils').normalizeReference;\n\nvar isSpace = require('../common/utils').isSpace;\n\nmodule.exports = function image(state, silent) {\n var attrs,\n code,\n content,\n label,\n labelEnd,\n labelStart,\n pos,\n ref,\n res,\n title,\n token,\n tokens,\n start,\n href = '',\n oldPos = state.pos,\n max = state.posMax;\n\n if (state.src.charCodeAt(state.pos) !== 0x21\n /* ! */\n ) {\n return false;\n }\n\n if (state.src.charCodeAt(state.pos + 1) !== 0x5B\n /* [ */\n ) {\n return false;\n }\n\n labelStart = state.pos + 2;\n labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false); // parser failed to find ']', so it's not a valid link\n\n if (labelEnd < 0) {\n return false;\n }\n\n pos = labelEnd + 1;\n\n if (pos < max && state.src.charCodeAt(pos) === 0x28\n /* ( */\n ) {\n //\n // Inline link\n //\n // [link]( \"title\" )\n // ^^ skipping these spaces\n pos++;\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n }\n\n if (pos >= max) {\n return false;\n } // [link]( \"title\" )\n // ^^^^^^ parsing link destination\n\n\n start = pos;\n res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);\n\n if (res.ok) {\n href = state.md.normalizeLink(res.str);\n\n if (state.md.validateLink(href)) {\n pos = res.pos;\n } else {\n href = '';\n }\n } // [link]( \"title\" )\n // ^^ skipping these spaces\n\n\n start = pos;\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n } // [link]( \"title\" )\n // ^^^^^^^ parsing link title\n\n\n res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);\n\n if (pos < max && start !== pos && res.ok) {\n title = res.str;\n pos = res.pos; // [link]( \"title\" )\n // ^^ skipping these spaces\n\n for (; pos < max; pos++) {\n code = state.src.charCodeAt(pos);\n\n if (!isSpace(code) && code !== 0x0A) {\n break;\n }\n }\n } else {\n title = '';\n }\n\n if (pos >= max || state.src.charCodeAt(pos) !== 0x29\n /* ) */\n ) {\n state.pos = oldPos;\n return false;\n }\n\n pos++;\n } else {\n //\n // Link reference\n //\n if (typeof state.env.references === 'undefined') {\n return false;\n }\n\n if (pos < max && state.src.charCodeAt(pos) === 0x5B\n /* [ */\n ) {\n start = pos + 1;\n pos = state.md.helpers.parseLinkLabel(state, pos);\n\n if (pos >= 0) {\n label = state.src.slice(start, pos++);\n } else {\n pos = labelEnd + 1;\n }\n } else {\n pos = labelEnd + 1;\n } // covers label === '' and label === undefined\n // (collapsed reference link and shortcut reference link respectively)\n\n\n if (!label) {\n label = state.src.slice(labelStart, labelEnd);\n }\n\n ref = state.env.references[normalizeReference(label)];\n\n if (!ref) {\n state.pos = oldPos;\n return false;\n }\n\n href = ref.href;\n title = ref.title;\n } //\n // We found the end of the link, and know for a fact it's a valid link;\n // so all that's left to do is to call tokenizer.\n //\n\n\n if (!silent) {\n content = state.src.slice(labelStart, labelEnd);\n state.md.inline.parse(content, state.md, state.env, tokens = []);\n token = state.push('image', 'img', 0);\n token.attrs = attrs = [['src', href], ['alt', '']];\n token.children = tokens;\n token.content = content;\n\n if (title) {\n attrs.push(['title', title]);\n }\n }\n\n state.pos = pos;\n state.posMax = max;\n return true;\n};","// Process autolinks ''\n'use strict';\n/*eslint max-len:0*/\n\nvar EMAIL_RE = /^([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/;\nvar AUTOLINK_RE = /^([a-zA-Z][a-zA-Z0-9+.\\-]{1,31}):([^<>\\x00-\\x20]*)$/;\n\nmodule.exports = function autolink(state, silent) {\n var url,\n fullUrl,\n token,\n ch,\n start,\n max,\n pos = state.pos;\n\n if (state.src.charCodeAt(pos) !== 0x3C\n /* < */\n ) {\n return false;\n }\n\n start = state.pos;\n max = state.posMax;\n\n for (;;) {\n if (++pos >= max) return false;\n ch = state.src.charCodeAt(pos);\n if (ch === 0x3C\n /* < */\n ) return false;\n if (ch === 0x3E\n /* > */\n ) break;\n }\n\n url = state.src.slice(start + 1, pos);\n\n if (AUTOLINK_RE.test(url)) {\n fullUrl = state.md.normalizeLink(url);\n\n if (!state.md.validateLink(fullUrl)) {\n return false;\n }\n\n if (!silent) {\n token = state.push('link_open', 'a', 1);\n token.attrs = [['href', fullUrl]];\n token.markup = 'autolink';\n token.info = 'auto';\n token = state.push('text', '', 0);\n token.content = state.md.normalizeLinkText(url);\n token = state.push('link_close', 'a', -1);\n token.markup = 'autolink';\n token.info = 'auto';\n }\n\n state.pos += url.length + 2;\n return true;\n }\n\n if (EMAIL_RE.test(url)) {\n fullUrl = state.md.normalizeLink('mailto:' + url);\n\n if (!state.md.validateLink(fullUrl)) {\n return false;\n }\n\n if (!silent) {\n token = state.push('link_open', 'a', 1);\n token.attrs = [['href', fullUrl]];\n token.markup = 'autolink';\n token.info = 'auto';\n token = state.push('text', '', 0);\n token.content = state.md.normalizeLinkText(url);\n token = state.push('link_close', 'a', -1);\n token.markup = 'autolink';\n token.info = 'auto';\n }\n\n state.pos += url.length + 2;\n return true;\n }\n\n return false;\n};","// Process html tags\n'use strict';\n\nvar HTML_TAG_RE = require('../common/html_re').HTML_TAG_RE;\n\nfunction isLinkOpen(str) {\n return /^\\s]/i.test(str);\n}\n\nfunction isLinkClose(str) {\n return /^<\\/a\\s*>/i.test(str);\n}\n\nfunction isLetter(ch) {\n /*eslint no-bitwise:0*/\n var lc = ch | 0x20; // to lower case\n\n return lc >= 0x61\n /* a */\n && lc <= 0x7a\n /* z */\n ;\n}\n\nmodule.exports = function html_inline(state, silent) {\n var ch,\n match,\n max,\n token,\n pos = state.pos;\n\n if (!state.md.options.html) {\n return false;\n } // Check start\n\n\n max = state.posMax;\n\n if (state.src.charCodeAt(pos) !== 0x3C\n /* < */\n || pos + 2 >= max) {\n return false;\n } // Quick fail on second char\n\n\n ch = state.src.charCodeAt(pos + 1);\n\n if (ch !== 0x21\n /* ! */\n && ch !== 0x3F\n /* ? */\n && ch !== 0x2F\n /* / */\n && !isLetter(ch)) {\n return false;\n }\n\n match = state.src.slice(pos).match(HTML_TAG_RE);\n\n if (!match) {\n return false;\n }\n\n if (!silent) {\n token = state.push('html_inline', '', 0);\n token.content = match[0];\n if (isLinkOpen(token.content)) state.linkLevel++;\n if (isLinkClose(token.content)) state.linkLevel--;\n }\n\n state.pos += match[0].length;\n return true;\n};","// Process html entity - {, ¯, ", ...\n'use strict';\n\nvar entities = require('../common/entities');\n\nvar has = require('../common/utils').has;\n\nvar isValidEntityCode = require('../common/utils').isValidEntityCode;\n\nvar fromCodePoint = require('../common/utils').fromCodePoint;\n\nvar DIGITAL_RE = /^((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i;\nvar NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i;\n\nmodule.exports = function entity(state, silent) {\n var ch,\n code,\n match,\n token,\n pos = state.pos,\n max = state.posMax;\n if (state.src.charCodeAt(pos) !== 0x26\n /* & */\n ) return false;\n if (pos + 1 >= max) return false;\n ch = state.src.charCodeAt(pos + 1);\n\n if (ch === 0x23\n /* # */\n ) {\n match = state.src.slice(pos).match(DIGITAL_RE);\n\n if (match) {\n if (!silent) {\n code = match[1][0].toLowerCase() === 'x' ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10);\n token = state.push('text_special', '', 0);\n token.content = isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(0xFFFD);\n token.markup = match[0];\n token.info = 'entity';\n }\n\n state.pos += match[0].length;\n return true;\n }\n } else {\n match = state.src.slice(pos).match(NAMED_RE);\n\n if (match) {\n if (has(entities, match[1])) {\n if (!silent) {\n token = state.push('text_special', '', 0);\n token.content = entities[match[1]];\n token.markup = match[0];\n token.info = 'entity';\n }\n\n state.pos += match[0].length;\n return true;\n }\n }\n }\n\n return false;\n};","// For each opening emphasis-like marker find a matching closing one\n//\n'use strict';\n\nfunction processDelimiters(delimiters) {\n var closerIdx,\n openerIdx,\n closer,\n opener,\n minOpenerIdx,\n newMinOpenerIdx,\n isOddMatch,\n lastJump,\n openersBottom = {},\n max = delimiters.length;\n if (!max) return; // headerIdx is the first delimiter of the current (where closer is) delimiter run\n\n var headerIdx = 0;\n var lastTokenIdx = -2; // needs any value lower than -1\n\n var jumps = [];\n\n for (closerIdx = 0; closerIdx < max; closerIdx++) {\n closer = delimiters[closerIdx];\n jumps.push(0); // markers belong to same delimiter run if:\n // - they have adjacent tokens\n // - AND markers are the same\n //\n\n if (delimiters[headerIdx].marker !== closer.marker || lastTokenIdx !== closer.token - 1) {\n headerIdx = closerIdx;\n }\n\n lastTokenIdx = closer.token; // Length is only used for emphasis-specific \"rule of 3\",\n // if it's not defined (in strikethrough or 3rd party plugins),\n // we can default it to 0 to disable those checks.\n //\n\n closer.length = closer.length || 0;\n if (!closer.close) continue; // Previously calculated lower bounds (previous fails)\n // for each marker, each delimiter length modulo 3,\n // and for whether this closer can be an opener;\n // https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460\n\n if (!openersBottom.hasOwnProperty(closer.marker)) {\n openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1];\n }\n\n minOpenerIdx = openersBottom[closer.marker][(closer.open ? 3 : 0) + closer.length % 3];\n openerIdx = headerIdx - jumps[headerIdx] - 1;\n newMinOpenerIdx = openerIdx;\n\n for (; openerIdx > minOpenerIdx; openerIdx -= jumps[openerIdx] + 1) {\n opener = delimiters[openerIdx];\n if (opener.marker !== closer.marker) continue;\n\n if (opener.open && opener.end < 0) {\n isOddMatch = false; // from spec:\n //\n // If one of the delimiters can both open and close emphasis, then the\n // sum of the lengths of the delimiter runs containing the opening and\n // closing delimiters must not be a multiple of 3 unless both lengths\n // are multiples of 3.\n //\n\n if (opener.close || closer.open) {\n if ((opener.length + closer.length) % 3 === 0) {\n if (opener.length % 3 !== 0 || closer.length % 3 !== 0) {\n isOddMatch = true;\n }\n }\n }\n\n if (!isOddMatch) {\n // If previous delimiter cannot be an opener, we can safely skip\n // the entire sequence in future checks. This is required to make\n // sure algorithm has linear complexity (see *_*_*_*_*_... case).\n //\n lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open ? jumps[openerIdx - 1] + 1 : 0;\n jumps[closerIdx] = closerIdx - openerIdx + lastJump;\n jumps[openerIdx] = lastJump;\n closer.open = false;\n opener.end = closerIdx;\n opener.close = false;\n newMinOpenerIdx = -1; // treat next token as start of run,\n // it optimizes skips in **<...>**a**<...>** pathological case\n\n lastTokenIdx = -2;\n break;\n }\n }\n }\n\n if (newMinOpenerIdx !== -1) {\n // If match for this delimiter run failed, we want to set lower bound for\n // future lookups. This is required to make sure algorithm has linear\n // complexity.\n //\n // See details here:\n // https://github.com/commonmark/cmark/issues/178#issuecomment-270417442\n //\n openersBottom[closer.marker][(closer.open ? 3 : 0) + (closer.length || 0) % 3] = newMinOpenerIdx;\n }\n }\n}\n\nmodule.exports = function link_pairs(state) {\n var curr,\n tokens_meta = state.tokens_meta,\n max = state.tokens_meta.length;\n processDelimiters(state.delimiters);\n\n for (curr = 0; curr < max; curr++) {\n if (tokens_meta[curr] && tokens_meta[curr].delimiters) {\n processDelimiters(tokens_meta[curr].delimiters);\n }\n }\n};","// Clean up tokens after emphasis and strikethrough postprocessing:\n// merge adjacent text nodes into one and re-calculate all token levels\n//\n// This is necessary because initially emphasis delimiter markers (*, _, ~)\n// are treated as their own separate text tokens. Then emphasis rule either\n// leaves them as text (needed to merge with adjacent text) or turns them\n// into opening/closing tags (which messes up levels inside).\n//\n'use strict';\n\nmodule.exports = function fragments_join(state) {\n var curr,\n last,\n level = 0,\n tokens = state.tokens,\n max = state.tokens.length;\n\n for (curr = last = 0; curr < max; curr++) {\n // re-calculate levels after emphasis/strikethrough turns some text nodes\n // into opening/closing tags\n if (tokens[curr].nesting < 0) level--; // closing tag\n\n tokens[curr].level = level;\n if (tokens[curr].nesting > 0) level++; // opening tag\n\n if (tokens[curr].type === 'text' && curr + 1 < max && tokens[curr + 1].type === 'text') {\n // collapse two adjacent text nodes\n tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;\n } else {\n if (curr !== last) {\n tokens[last] = tokens[curr];\n }\n\n last++;\n }\n }\n\n if (curr !== last) {\n tokens.length = last;\n }\n};","// Inline parser state\n'use strict';\n\nvar Token = require('../token');\n\nvar isWhiteSpace = require('../common/utils').isWhiteSpace;\n\nvar isPunctChar = require('../common/utils').isPunctChar;\n\nvar isMdAsciiPunct = require('../common/utils').isMdAsciiPunct;\n\nfunction StateInline(src, md, env, outTokens) {\n this.src = src;\n this.env = env;\n this.md = md;\n this.tokens = outTokens;\n this.tokens_meta = Array(outTokens.length);\n this.pos = 0;\n this.posMax = this.src.length;\n this.level = 0;\n this.pending = '';\n this.pendingLevel = 0; // Stores { start: end } pairs. Useful for backtrack\n // optimization of pairs parse (emphasis, strikes).\n\n this.cache = {}; // List of emphasis-like delimiters for current tag\n\n this.delimiters = []; // Stack of delimiter lists for upper level tags\n\n this._prev_delimiters = []; // backtick length => last seen position\n\n this.backticks = {};\n this.backticksScanned = false; // Counter used to disable inline linkify-it execution\n // inside and markdown links\n\n this.linkLevel = 0;\n} // Flush pending text\n//\n\n\nStateInline.prototype.pushPending = function () {\n var token = new Token('text', '', 0);\n token.content = this.pending;\n token.level = this.pendingLevel;\n this.tokens.push(token);\n this.pending = '';\n return token;\n}; // Push new token to \"stream\".\n// If pending text exists - flush it as text token\n//\n\n\nStateInline.prototype.push = function (type, tag, nesting) {\n if (this.pending) {\n this.pushPending();\n }\n\n var token = new Token(type, tag, nesting);\n var token_meta = null;\n\n if (nesting < 0) {\n // closing tag\n this.level--;\n this.delimiters = this._prev_delimiters.pop();\n }\n\n token.level = this.level;\n\n if (nesting > 0) {\n // opening tag\n this.level++;\n\n this._prev_delimiters.push(this.delimiters);\n\n this.delimiters = [];\n token_meta = {\n delimiters: this.delimiters\n };\n }\n\n this.pendingLevel = this.level;\n this.tokens.push(token);\n this.tokens_meta.push(token_meta);\n return token;\n}; // Scan a sequence of emphasis-like markers, and determine whether\n// it can start an emphasis sequence or end an emphasis sequence.\n//\n// - start - position to scan from (it should point at a valid marker);\n// - canSplitWord - determine if these markers can be found inside a word\n//\n\n\nStateInline.prototype.scanDelims = function (start, canSplitWord) {\n var pos = start,\n lastChar,\n nextChar,\n count,\n can_open,\n can_close,\n isLastWhiteSpace,\n isLastPunctChar,\n isNextWhiteSpace,\n isNextPunctChar,\n left_flanking = true,\n right_flanking = true,\n max = this.posMax,\n marker = this.src.charCodeAt(start); // treat beginning of the line as a whitespace\n\n lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20;\n\n while (pos < max && this.src.charCodeAt(pos) === marker) {\n pos++;\n }\n\n count = pos - start; // treat end of the line as a whitespace\n\n nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20;\n isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));\n isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));\n isLastWhiteSpace = isWhiteSpace(lastChar);\n isNextWhiteSpace = isWhiteSpace(nextChar);\n\n if (isNextWhiteSpace) {\n left_flanking = false;\n } else if (isNextPunctChar) {\n if (!(isLastWhiteSpace || isLastPunctChar)) {\n left_flanking = false;\n }\n }\n\n if (isLastWhiteSpace) {\n right_flanking = false;\n } else if (isLastPunctChar) {\n if (!(isNextWhiteSpace || isNextPunctChar)) {\n right_flanking = false;\n }\n }\n\n if (!canSplitWord) {\n can_open = left_flanking && (!right_flanking || isLastPunctChar);\n can_close = right_flanking && (!left_flanking || isNextPunctChar);\n } else {\n can_open = left_flanking;\n can_close = right_flanking;\n }\n\n return {\n can_open: can_open,\n can_close: can_close,\n length: count\n };\n}; // re-export Token class to use in block rules\n\n\nStateInline.prototype.Token = Token;\nmodule.exports = StateInline;","// markdown-it default options\n'use strict';\n\nmodule.exports = {\n options: {\n html: false,\n // Enable HTML tags in source\n xhtmlOut: false,\n // Use '/' to close single tags (
)\n breaks: false,\n // Convert '\\n' in paragraphs into
\n langPrefix: 'language-',\n // CSS language prefix for fenced blocks\n linkify: false,\n // autoconvert URL-like texts to links\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: \"\\u201C\\u201D\\u2018\\u2019\",\n\n /* “”‘’ */\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with )\n breaks: false,\n // Convert '\\n' in paragraphs into
\n langPrefix: 'language-',\n // CSS language prefix for fenced blocks\n linkify: false,\n // autoconvert URL-like texts to links\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: \"\\u201C\\u201D\\u2018\\u2019\",\n\n /* “”‘’ */\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with )\n breaks: false,\n // Convert '\\n' in paragraphs into
\n langPrefix: 'language-',\n // CSS language prefix for fenced blocks\n linkify: false,\n // autoconvert URL-like texts to links\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: \"\\u201C\\u201D\\u2018\\u2019\",\n\n /* “”‘’ */\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with index; index++) if (NO_HOLES || index in self) {\n value = self[index];\n result = boundFunction(value, index, O);\n if (TYPE) {\n if (IS_MAP) target[index] = result; // map\n else if (result) switch (TYPE) {\n case 3: return true; // some\n case 5: return value; // find\n case 6: return index; // findIndex\n case 2: push.call(target, value); // filter\n } else switch (TYPE) {\n case 4: return false; // every\n case 7: push.call(target, value); // filterOut\n }\n }\n }\n return IS_FIND_INDEX ? -1 : IS_SOME || IS_EVERY ? IS_EVERY : target;\n };\n};\n\nmodule.exports = {\n // `Array.prototype.forEach` method\n // https://tc39.es/ecma262/#sec-array.prototype.foreach\n forEach: createMethod(0),\n // `Array.prototype.map` method\n // https://tc39.es/ecma262/#sec-array.prototype.map\n map: createMethod(1),\n // `Array.prototype.filter` method\n // https://tc39.es/ecma262/#sec-array.prototype.filter\n filter: createMethod(2),\n // `Array.prototype.some` method\n // https://tc39.es/ecma262/#sec-array.prototype.some\n some: createMethod(3),\n // `Array.prototype.every` method\n // https://tc39.es/ecma262/#sec-array.prototype.every\n every: createMethod(4),\n // `Array.prototype.find` method\n // https://tc39.es/ecma262/#sec-array.prototype.find\n find: createMethod(5),\n // `Array.prototype.findIndex` method\n // https://tc39.es/ecma262/#sec-array.prototype.findIndex\n findIndex: createMethod(6),\n // `Array.prototype.filterOut` method\n // https://github.com/tc39/proposal-array-filtering\n filterOut: createMethod(7)\n};\n","var isObject = require('../internals/is-object');\n\n// `ToPrimitive` abstract operation\n// https://tc39.es/ecma262/#sec-toprimitive\n// instead of the ES6 spec version, we didn't implement @@toPrimitive case\n// and the second argument - flag - preferred type is a string\nmodule.exports = function (input, PREFERRED_STRING) {\n if (!isObject(input)) return input;\n var fn, val;\n if (PREFERRED_STRING && typeof (fn = input.toString) == 'function' && !isObject(val = fn.call(input))) return val;\n if (typeof (fn = input.valueOf) == 'function' && !isObject(val = fn.call(input))) return val;\n if (!PREFERRED_STRING && typeof (fn = input.toString) == 'function' && !isObject(val = fn.call(input))) return val;\n throw TypeError(\"Can't convert object to primitive value\");\n};\n","var anObject = require('../internals/an-object');\nvar defineProperties = require('../internals/object-define-properties');\nvar enumBugKeys = require('../internals/enum-bug-keys');\nvar hiddenKeys = require('../internals/hidden-keys');\nvar html = require('../internals/html');\nvar documentCreateElement = require('../internals/document-create-element');\nvar sharedKey = require('../internals/shared-key');\n\nvar GT = '>';\nvar LT = '<';\nvar PROTOTYPE = 'prototype';\nvar SCRIPT = 'script';\nvar IE_PROTO = sharedKey('IE_PROTO');\n\nvar EmptyConstructor = function () { /* empty */ };\n\nvar scriptTag = function (content) {\n return LT + SCRIPT + GT + content + LT + '/' + SCRIPT + GT;\n};\n\n// Create object with fake `null` prototype: use ActiveX Object with cleared prototype\nvar NullProtoObjectViaActiveX = function (activeXDocument) {\n activeXDocument.write(scriptTag(''));\n activeXDocument.close();\n var temp = activeXDocument.parentWindow.Object;\n activeXDocument = null; // avoid memory leak\n return temp;\n};\n\n// Create object with fake `null` prototype: use iframe Object with cleared prototype\nvar NullProtoObjectViaIFrame = function () {\n // Thrash, waste and sodomy: IE GC bug\n var iframe = documentCreateElement('iframe');\n var JS = 'java' + SCRIPT + ':';\n var iframeDocument;\n iframe.style.display = 'none';\n html.appendChild(iframe);\n // https://github.com/zloirock/core-js/issues/475\n iframe.src = String(JS);\n iframeDocument = iframe.contentWindow.document;\n iframeDocument.open();\n iframeDocument.write(scriptTag('document.F=Object'));\n iframeDocument.close();\n return iframeDocument.F;\n};\n\n// Check for document.domain and active x support\n// No need to use active x approach when document.domain is not set\n// see https://github.com/es-shims/es5-shim/issues/150\n// variation of https://github.com/kitcambridge/es5-shim/commit/4f738ac066346\n// avoid IE GC bug\nvar activeXDocument;\nvar NullProtoObject = function () {\n try {\n /* global ActiveXObject -- old IE */\n activeXDocument = document.domain && new ActiveXObject('htmlfile');\n } catch (error) { /* ignore */ }\n NullProtoObject = activeXDocument ? NullProtoObjectViaActiveX(activeXDocument) : NullProtoObjectViaIFrame();\n var length = enumBugKeys.length;\n while (length--) delete NullProtoObject[PROTOTYPE][enumBugKeys[length]];\n return NullProtoObject();\n};\n\nhiddenKeys[IE_PROTO] = true;\n\n// `Object.create` method\n// https://tc39.es/ecma262/#sec-object.create\nmodule.exports = Object.create || function create(O, Properties) {\n var result;\n if (O !== null) {\n EmptyConstructor[PROTOTYPE] = anObject(O);\n result = new EmptyConstructor();\n EmptyConstructor[PROTOTYPE] = null;\n // add \"__proto__\" for Object.getPrototypeOf polyfill\n result[IE_PROTO] = O;\n } else result = NullProtoObject();\n return Properties === undefined ? result : defineProperties(result, Properties);\n};\n","// Process [@mention](mention://user/1/Pranav)\nconst USER_MENTIONS_REGEX = /mention:\\/\\/(user|team)\\/(\\d+)\\/(.+)/gm;\n\nconst buildMentionTokens = () => (state, silent) => {\n var label;\n var labelEnd;\n var labelStart;\n var pos;\n var res;\n var token;\n var href = '';\n var max = state.posMax;\n\n if (state.src.charCodeAt(state.pos) !== 0x5b /* [ */) {\n return false;\n }\n\n labelStart = state.pos + 1;\n labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true);\n\n // parser failed to find ']', so it's not a valid link\n if (labelEnd < 0) {\n return false;\n }\n\n label = state.src.slice(labelStart, labelEnd);\n pos = labelEnd + 1;\n\n if (pos < max && state.src.charCodeAt(pos) === 0x28 /* ( */) {\n pos += 1;\n res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);\n if (res.ok) {\n href = state.md.normalizeLink(res.str);\n if (state.md.validateLink(href)) {\n pos = res.pos;\n } else {\n href = '';\n }\n }\n pos += 1;\n }\n\n if (!href.match(new RegExp(USER_MENTIONS_REGEX))) {\n return false;\n }\n\n if (!silent) {\n state.pos = labelStart;\n state.posMax = labelEnd;\n\n token = state.push('mention', '');\n token.href = href;\n token.content = label;\n }\n\n state.pos = pos;\n state.posMax = max;\n\n return true;\n};\n\nconst renderMentions = () => (tokens, idx) => {\n return `${tokens[idx].content}`;\n};\n\nexport default function mentionPlugin(md) {\n md.renderer.rules.mention = renderMentions(md);\n md.inline.ruler.before('link', 'mention', buildMentionTokens(md));\n}\n","import mila from 'markdown-it-link-attributes';\nimport mentionPlugin from './markdownIt/link';\n\nconst setImageHeight = inlineToken => {\n const imgSrc = inlineToken.attrGet('src');\n if (!imgSrc) return;\n const url = new URL(imgSrc);\n const height = url.searchParams.get('cw_image_height');\n if (!height) return;\n inlineToken.attrSet('style', `height: ${height};`);\n};\n\nconst processInlineToken = blockToken => {\n blockToken.children.forEach(inlineToken => {\n if (inlineToken.type === 'image') {\n setImageHeight(inlineToken);\n }\n });\n};\n\nconst imgResizeManager = md => {\n // Custom rule for image resize in markdown\n // If the image url has a query param cw_image_height, then add a style attribute to the image\n md.core.ruler.after('inline', 'add-image-height', state => {\n state.tokens.forEach(blockToken => {\n if (blockToken.type === 'inline') {\n processInlineToken(blockToken);\n }\n });\n });\n};\n\nconst md = require('markdown-it')({\n html: false,\n xhtmlOut: true,\n breaks: true,\n langPrefix: 'language-',\n linkify: true,\n typographer: true,\n quotes: '\\u201c\\u201d\\u2018\\u2019',\n maxNesting: 20,\n})\n .use(mentionPlugin)\n .use(imgResizeManager)\n .use(mila, {\n attrs: {\n class: 'link',\n rel: 'noreferrer noopener nofollow',\n target: '_blank',\n },\n });\n\nconst TWITTER_USERNAME_REGEX = /(^|[^@\\w])@(\\w{1,15})\\b/g;\nconst TWITTER_USERNAME_REPLACEMENT = '$1[@$2](http://twitter.com/$2)';\nconst TWITTER_HASH_REGEX = /(^|\\s)#(\\w+)/g;\nconst TWITTER_HASH_REPLACEMENT = '$1[#$2](https://twitter.com/hashtag/$2)';\n\nclass MessageFormatter {\n constructor(message, isATweet = false, isAPrivateNote = false) {\n this.message = message || '';\n this.isAPrivateNote = isAPrivateNote;\n this.isATweet = isATweet;\n }\n\n formatMessage() {\n let updatedMessage = this.message;\n if (this.isATweet && !this.isAPrivateNote) {\n updatedMessage = updatedMessage.replace(\n TWITTER_USERNAME_REGEX,\n TWITTER_USERNAME_REPLACEMENT\n );\n updatedMessage = updatedMessage.replace(\n TWITTER_HASH_REGEX,\n TWITTER_HASH_REPLACEMENT\n );\n }\n return md.render(updatedMessage);\n }\n\n get formattedMessage() {\n return this.formatMessage();\n }\n\n get plainText() {\n const strippedOutHtml = new DOMParser().parseFromString(\n this.formattedMessage,\n 'text/html'\n );\n return strippedOutHtml.body.textContent || '';\n }\n}\n\nexport default MessageFormatter;\n","import MessageFormatter from '../helpers/MessageFormatter';\n\n/**\n * A composable providing utility functions for message formatting.\n *\n * @returns {Object} A set of functions for message formatting.\n */\nexport const useMessageFormatter = () => {\n /**\n * Formats a message based on specified conditions.\n *\n * @param {string} message - The message to be formatted.\n * @param {boolean} isATweet - Whether the message is a tweet.\n * @param {boolean} isAPrivateNote - Whether the message is a private note.\n * @returns {string} - The formatted message.\n */\n const formatMessage = (message, isATweet, isAPrivateNote) => {\n const messageFormatter = new MessageFormatter(\n message,\n isATweet,\n isAPrivateNote\n );\n return messageFormatter.formattedMessage;\n };\n\n /**\n * Converts a message to plain text.\n *\n * @param {string} message - The message to be converted.\n * @param {boolean} isATweet - Whether the message is a tweet.\n * @returns {string} - The plain text message.\n */\n const getPlainText = (message, isATweet) => {\n const messageFormatter = new MessageFormatter(message, isATweet);\n return messageFormatter.plainText;\n };\n\n /**\n * Truncates a description to a maximum length of 100 characters.\n *\n * @param {string} [description=''] - The description to be truncated.\n * @returns {string} - The truncated description.\n */\n const truncateMessage = (description = '') => {\n if (description.length < 100) {\n return description;\n }\n\n return `${description.slice(0, 97)}...`;\n };\n\n /**\n * Highlights occurrences of a search term within given content.\n *\n * @param {string} [content=''] - The content in which to search.\n * @param {string} [searchTerm=''] - The term to search for.\n * @param {string} [highlightClass=''] - The CSS class to apply to the highlighted term.\n * @returns {string} - The content with highlighted terms.\n */\n const highlightContent = (\n content = '',\n searchTerm = '',\n highlightClass = ''\n ) => {\n const plainTextContent = getPlainText(content);\n\n // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping\n const escapedSearchTerm = searchTerm.replace(/[.*+?^${}()|[\\]\\\\]/g, '\\\\$&');\n\n return plainTextContent.replace(\n new RegExp(`(${escapedSearchTerm})`, 'ig'),\n `$1`\n );\n };\n\n return {\n formatMessage,\n getPlainText,\n truncateMessage,\n highlightContent,\n };\n};\n","\"use strict\"; // Adapted from https://github.com/markdown-it/markdown-it/blob/fbc6b0fed563ba7c00557ab638fd19752f8e759d/docs/architecture.md\n\nfunction findFirstMatchingConfig(link, configs) {\n var i, config;\n var href = link.attrs[link.attrIndex(\"href\")][1];\n\n for (i = 0; i < configs.length; ++i) {\n config = configs[i]; // If there is a matcher function defined then call it\n // Matcher Function should return a boolean indicating\n // whether or not it matched. If it matched, use that\n // configuration, otherwise, try the next one.\n\n if (typeof config.matcher === \"function\") {\n if (config.matcher(href, config)) {\n return config;\n } else {\n continue;\n }\n }\n\n return config;\n }\n}\n\nfunction applyAttributes(idx, tokens, attributes) {\n Object.keys(attributes).forEach(function (attr) {\n var attrIndex;\n var value = attributes[attr];\n\n if (attr === \"className\") {\n // when dealing with applying classes\n // programatically, some programmers\n // may prefer to use the className syntax\n attr = \"class\";\n }\n\n attrIndex = tokens[idx].attrIndex(attr);\n\n if (attrIndex < 0) {\n // attr doesn't exist, add new attribute\n tokens[idx].attrPush([attr, value]);\n } else {\n // attr already exists, overwrite it\n tokens[idx].attrs[attrIndex][1] = value; // replace value of existing attr\n }\n });\n}\n\nfunction markdownitLinkAttributes(md, configs) {\n if (!configs) {\n configs = [];\n } else {\n configs = Array.isArray(configs) ? configs : [configs];\n }\n\n Object.freeze(configs);\n var defaultRender = md.renderer.rules.link_open || this.defaultRender;\n\n md.renderer.rules.link_open = function (tokens, idx, options, env, self) {\n var config = findFirstMatchingConfig(tokens[idx], configs);\n var attributes = config && config.attrs;\n\n if (attributes) {\n applyAttributes(idx, tokens, attributes);\n } // pass token to default renderer.\n\n\n return defaultRender(tokens, idx, options, env, self);\n };\n}\n\nmarkdownitLinkAttributes.defaultRender = function (tokens, idx, options, env, self) {\n return self.renderToken(tokens, idx, options);\n};\n\nmodule.exports = markdownitLinkAttributes;","module.exports = {};\n","var TO_STRING_TAG_SUPPORT = require('../internals/to-string-tag-support');\nvar redefine = require('../internals/redefine');\nvar toString = require('../internals/object-to-string');\n\n// `Object.prototype.toString` method\n// https://tc39.es/ecma262/#sec-object.prototype.tostring\nif (!TO_STRING_TAG_SUPPORT) {\n redefine(Object.prototype, 'toString', toString, { unsafe: true });\n}\n","'use strict';\nvar $ = require('../internals/export');\nvar notARegExp = require('../internals/not-a-regexp');\nvar requireObjectCoercible = require('../internals/require-object-coercible');\nvar correctIsRegExpLogic = require('../internals/correct-is-regexp-logic');\n\n// `String.prototype.includes` method\n// https://tc39.es/ecma262/#sec-string.prototype.includes\n$({ target: 'String', proto: true, forced: !correctIsRegExpLogic('includes') }, {\n includes: function includes(searchString /* , position = 0 */) {\n return !!~String(requireObjectCoercible(this))\n .indexOf(notARegExp(searchString), arguments.length > 1 ? arguments[1] : undefined);\n }\n});\n","'use strict';\n\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\nimport utils from '../utils.js';\nimport AxiosError from '../core/AxiosError.js'; // temporary hotfix to avoid circular references until AxiosURLSearchParams is refactored\n\nimport PlatformFormData from '../platform/node/classes/FormData.js';\n/**\n * Determines if the given thing is a array or js object.\n *\n * @param {string} thing - The object or array to be visited.\n *\n * @returns {boolean}\n */\n\nfunction isVisitable(thing) {\n return utils.isPlainObject(thing) || utils.isArray(thing);\n}\n/**\n * It removes the brackets from the end of a string\n *\n * @param {string} key - The key of the parameter.\n *\n * @returns {string} the key without the brackets.\n */\n\n\nfunction removeBrackets(key) {\n return utils.endsWith(key, '[]') ? key.slice(0, -2) : key;\n}\n/**\n * It takes a path, a key, and a boolean, and returns a string\n *\n * @param {string} path - The path to the current key.\n * @param {string} key - The key of the current object being iterated over.\n * @param {string} dots - If true, the key will be rendered with dots instead of brackets.\n *\n * @returns {string} The path to the current key.\n */\n\n\nfunction renderKey(path, key, dots) {\n if (!path) return key;\n return path.concat(key).map(function each(token, i) {\n // eslint-disable-next-line no-param-reassign\n token = removeBrackets(token);\n return !dots && i ? '[' + token + ']' : token;\n }).join(dots ? '.' : '');\n}\n/**\n * If the array is an array and none of its elements are visitable, then it's a flat array.\n *\n * @param {Array} arr - The array to check\n *\n * @returns {boolean}\n */\n\n\nfunction isFlatArray(arr) {\n return utils.isArray(arr) && !arr.some(isVisitable);\n}\n\nvar predicates = utils.toFlatObject(utils, {}, null, function filter(prop) {\n return /^is[A-Z]/.test(prop);\n});\n/**\n * Convert a data object to FormData\n *\n * @param {Object} obj\n * @param {?Object} [formData]\n * @param {?Object} [options]\n * @param {Function} [options.visitor]\n * @param {Boolean} [options.metaTokens = true]\n * @param {Boolean} [options.dots = false]\n * @param {?Boolean} [options.indexes = false]\n *\n * @returns {Object}\n **/\n\n/**\n * It converts an object into a FormData object\n *\n * @param {Object} obj - The object to convert to form data.\n * @param {string} formData - The FormData object to append to.\n * @param {Object} options\n *\n * @returns\n */\n\nfunction toFormData(obj, formData, options) {\n if (!utils.isObject(obj)) {\n throw new TypeError('target must be an object');\n } // eslint-disable-next-line no-param-reassign\n\n\n formData = formData || new (PlatformFormData || FormData)(); // eslint-disable-next-line no-param-reassign\n\n options = utils.toFlatObject(options, {\n metaTokens: true,\n dots: false,\n indexes: false\n }, false, function defined(option, source) {\n // eslint-disable-next-line no-eq-null,eqeqeq\n return !utils.isUndefined(source[option]);\n });\n var metaTokens = options.metaTokens; // eslint-disable-next-line no-use-before-define\n\n var visitor = options.visitor || defaultVisitor;\n var dots = options.dots;\n var indexes = options.indexes;\n\n var _Blob = options.Blob || typeof Blob !== 'undefined' && Blob;\n\n var useBlob = _Blob && utils.isSpecCompliantForm(formData);\n\n if (!utils.isFunction(visitor)) {\n throw new TypeError('visitor must be a function');\n }\n\n function convertValue(value) {\n if (value === null) return '';\n\n if (utils.isDate(value)) {\n return value.toISOString();\n }\n\n if (!useBlob && utils.isBlob(value)) {\n throw new AxiosError('Blob is not supported. Use a Buffer instead.');\n }\n\n if (utils.isArrayBuffer(value) || utils.isTypedArray(value)) {\n return useBlob && typeof Blob === 'function' ? new Blob([value]) : Buffer.from(value);\n }\n\n return value;\n }\n /**\n * Default visitor.\n *\n * @param {*} value\n * @param {String|Number} key\n * @param {Array} path\n * @this {FormData}\n *\n * @returns {boolean} return true to visit the each prop of the value recursively\n */\n\n\n function defaultVisitor(value, key, path) {\n var arr = value;\n\n if (value && !path && _typeof(value) === 'object') {\n if (utils.endsWith(key, '{}')) {\n // eslint-disable-next-line no-param-reassign\n key = metaTokens ? key : key.slice(0, -2); // eslint-disable-next-line no-param-reassign\n\n value = JSON.stringify(value);\n } else if (utils.isArray(value) && isFlatArray(value) || (utils.isFileList(value) || utils.endsWith(key, '[]')) && (arr = utils.toArray(value))) {\n // eslint-disable-next-line no-param-reassign\n key = removeBrackets(key);\n arr.forEach(function each(el, index) {\n !(utils.isUndefined(el) || el === null) && formData.append( // eslint-disable-next-line no-nested-ternary\n indexes === true ? renderKey([key], index, dots) : indexes === null ? key : key + '[]', convertValue(el));\n });\n return false;\n }\n }\n\n if (isVisitable(value)) {\n return true;\n }\n\n formData.append(renderKey(path, key, dots), convertValue(value));\n return false;\n }\n\n var stack = [];\n var exposedHelpers = Object.assign(predicates, {\n defaultVisitor: defaultVisitor,\n convertValue: convertValue,\n isVisitable: isVisitable\n });\n\n function build(value, path) {\n if (utils.isUndefined(value)) return;\n\n if (stack.indexOf(value) !== -1) {\n throw Error('Circular reference detected in ' + path.join('.'));\n }\n\n stack.push(value);\n utils.forEach(value, function each(el, key) {\n var result = !(utils.isUndefined(el) || el === null) && visitor.call(formData, el, utils.isString(key) ? key.trim() : key, path, exposedHelpers);\n\n if (result === true) {\n build(el, path ? path.concat(key) : [key]);\n }\n });\n stack.pop();\n }\n\n if (!utils.isObject(obj)) {\n throw new TypeError('data must be an object');\n }\n\n build(obj);\n return formData;\n}\n\nexport default toFormData;","var internalObjectKeys = require('../internals/object-keys-internal');\nvar enumBugKeys = require('../internals/enum-bug-keys');\n\nvar hiddenKeys = enumBugKeys.concat('length', 'prototype');\n\n// `Object.getOwnPropertyNames` method\n// https://tc39.es/ecma262/#sec-object.getownpropertynames\n// eslint-disable-next-line es/no-object-getownpropertynames -- safe\nexports.f = Object.getOwnPropertyNames || function getOwnPropertyNames(O) {\n return internalObjectKeys(O, hiddenKeys);\n};\n","module.exports = {};\n","var classof = require('../internals/classof-raw');\nvar global = require('../internals/global');\n\nmodule.exports = classof(global.process) == 'process';\n"],"sourceRoot":""}