diff --git a/CNAME b/CNAME new file mode 100644 index 0000000000..f9e3b7b282 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +xn--rttn50cxwe.xn--xuw71npw9a.ml \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..7c60232f70 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-2022, VoileLabs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/android-chrome-192x192.png b/android-chrome-192x192.png new file mode 100644 index 0000000000..b84ee9d431 Binary files /dev/null and b/android-chrome-192x192.png differ diff --git a/android-chrome-512x512.png b/android-chrome-512x512.png new file mode 100644 index 0000000000..09bfe518b3 Binary files /dev/null and b/android-chrome-512x512.png differ diff --git a/api/index.js b/api/index.js new file mode 100644 index 0000000000..f7047c265a --- /dev/null +++ b/api/index.js @@ -0,0 +1,216 @@ +require('cross-fetch/polyfill') + +const path = require('path') +const fs = require('fs') +const gql = require('fake-tag') +const { encode } = require('html-entities') +const { Readable } = require('stream') +const { compressStream } = require('../libs/common') + +const html = fs.readFileSync(path.join(__dirname, '../index.html')).toString() + +/** + * @param {import('@vercel/node').VercelRequest} req + * @param {import('@vercel/node').VercelResponse} res + */ +module.exports = async (req, res) => { + try { + processRequest(req, res) + } catch (e) { + res.status(200) + res.setHeader('Content-Type', 'text/html; charset=utf-8') + res.end(html) + } +} + +/** + * @param {import('@vercel/node').VercelRequest} req + * @param {import('@vercel/node').VercelResponse} res + */ +async function processRequest(req, res) { + let body = html + + const timeStart = Date.now() + + /* Open Graph */ + try { + if (/\/video\/\w+/.test(req.url)) { + const vid = req.url.match(/\/video\/(\w+)/)[1] + const data = ( + await queryGraphQL( + gql` + query ($vid: String!) { + getVideo(para: { vid: $vid, lang: "CHS" }) { + item { + coverImage + title + desc + } + tags { + languages { + value + } + } + } + } + `, + { + vid, + } + ) + ).data.getVideo + const og = [ + // common data + `
\n {{ t('user.user-page.avatar.current') }}\n
\n\n {{ t('user.user-page.avatar.requirement') }}\n
\n{{ t('user.user-page.password.title') }}
\n\n {{ t('user.user-page.password.old') }}\n
\n\n {{ t('user.user-page.password.new') }}\n
\n\n {{ t('user.user-page.password.confirmNew') }}\n
\n{{ t('user.user-page.email.title') }}
\n{{ t('user.user-page.email.boundEmail') }}:{{ email ?? t('user.user-page.email.none') }}
\n"+le(r[e].content)+`
+`};W.fence=function(r,e,t,n,o){var s=r[e],a=s.info?VN(s.info).trim():"",c="",i="",l,u,h,f,p;return a&&(h=a.split(/(\s+)/g),c=h[0],i=h.slice(2).join("")),t.highlight?l=t.highlight(s.content,c,i)||le(s.content):l=le(s.content),l.indexOf(""+l+`
+`):""+l+`
+`};W.image=function(r,e,t,n,o){var s=r[e];return s.attrs[s.attrIndex("alt")][1]=o.renderInlineAsText(s.children,t,n),o.renderToken(r,e,t)};W.hardbreak=function(r,e,t){return t.xhtmlOut?`=4||e.src.charCodeAt(m)!==91)return!1;for(;++m H(he/g)&&ce("overflow"),u*=g}const l=e.length+1;s=dn(n-i,l,i==0),H(n/l)>he-o&&ce("overflow"),o+=H(n/l),n%=l,e.splice(n++,0,o)}return String.fromCodePoint(...e)},Ot=function(r){const e=[];r=Lt(r);let t=r.length,n=un,o=0,s=ln;for(const i of r)i<128&&e.push(xt(i));let a=e.length,c=a;for(a&&e.push(pn);c =0))try{e.hostname=vn.toASCII(e.hostname)}catch{}return ie.encode(ie.format(e))}function a$(r){var e=ie.parse(r,!0);if(e.hostname&&(!e.protocol||bn.indexOf(e.protocol)>=0))try{e.hostname=vn.toUnicode(e.hostname)}catch{}return ie.decode(ie.format(e),ie.decode.defaultChars+"%")}function M(r,e){if(!(this instanceof M))return new M(r,e);e||Ce.isString(r)||(e=r||{},r="default"),this.inline=new t$,this.block=new e$,this.core=new Kj,this.renderer=new Xj,this.linkify=new r$,this.validateLink=c$,this.normalizeLink=i$,this.normalizeLinkText=a$,this.utils=Ce,this.helpers=Ce.assign({},Qj),this.options={},this.configure(r),e&&this.set(e)}M.prototype.set=function(r){return Ce.assign(this.options,r),this};M.prototype.configure=function(r){var e=this,t;if(Ce.isString(r)&&(t=r,r=n$[t],!r))throw new Error('Wrong `markdown-it` preset "'+t+'", check name');if(!r)throw new Error("Wrong `markdown-it` preset, can't be empty");return r.options&&e.set(r.options),r.components&&Object.keys(r.components).forEach(function(n){r.components[n].rules&&e[n].ruler.enableOnly(r.components[n].rules),r.components[n].rules2&&e[n].ruler2.enableOnly(r.components[n].rules2)}),this};M.prototype.enable=function(r,e){var t=[];Array.isArray(r)||(r=[r]),["core","block","inline"].forEach(function(o){t=t.concat(this[o].ruler.enable(r,!0))},this),t=t.concat(this.inline.ruler2.enable(r,!0));var n=r.filter(function(o){return t.indexOf(o)<0});if(n.length&&!e)throw new Error("MarkdownIt. Failed to enable unknown rule(s): "+n);return this};M.prototype.disable=function(r,e){var t=[];Array.isArray(r)||(r=[r]),["core","block","inline"].forEach(function(o){t=t.concat(this[o].ruler.disable(r,!0))},this),t=t.concat(this.inline.ruler2.disable(r,!0));var n=r.filter(function(o){return t.indexOf(o)<0});if(n.length&&!e)throw new Error("MarkdownIt. Failed to disable unknown rule(s): "+n);return this};M.prototype.use=function(r){var e=[this].concat(Array.prototype.slice.call(arguments,1));return r.apply(r,e),this};M.prototype.parse=function(r,e){if(typeof r!="string")throw new Error("Input data should be a String");var t=new this.core.State(r,this,e);return this.core.process(t),t.tokens};M.prototype.render=function(r,e){return e=e||{},this.renderer.render(this.parse(r,e),this.options,e)};M.prototype.parseInline=function(r,e){var t=new this.core.State(r,this,e);return t.inlineMode=!0,this.core.process(t),t.tokens};M.prototype.renderInline=function(r,e){return e=e||{},this.renderer.render(this.parseInline(r,e),this.options,e)};var l$=M;(function(r){r.exports=l$})(Qr);const DU=On(Qr.exports),u$=["aaa","aarp","abarth","abb","abbott","abbvie","abc","able","abogado","abudhabi","ac","academy","accenture","accountant","accountants","aco","actor","ad","adac","ads","adult","ae","aeg","aero","aetna","af","afl","africa","ag","agakhan","agency","ai","aig","airbus","airforce","airtel","akdn","al","alfaromeo","alibaba","alipay","allfinanz","allstate","ally","alsace","alstom","am","amazon","americanexpress","americanfamily","amex","amfam","amica","amsterdam","analytics","android","anquan","anz","ao","aol","apartments","app","apple","aq","aquarelle","ar","arab","aramco","archi","army","arpa","art","arte","as","asda","asia","associates","at","athleta","attorney","au","auction","audi","audible","audio","auspost","author","auto","autos","avianca","aw","aws","ax","axa","az","azure","ba","baby","baidu","banamex","bananarepublic","band","bank","bar","barcelona","barclaycard","barclays","barefoot","bargains","baseball","basketball","bauhaus","bayern","bb","bbc","bbt","bbva","bcg","bcn","bd","be","beats","beauty","beer","bentley","berlin","best","bestbuy","bet","bf","bg","bh","bharti","bi","bible","bid","bike","bing","bingo","bio","biz","bj","black","blackfriday","blockbuster","blog","bloomberg","blue","bm","bms","bmw","bn","bnpparibas","bo","boats","boehringer","bofa","bom","bond","boo","book","booking","bosch","bostik","boston","bot","boutique","box","br","bradesco","bridgestone","broadway","broker","brother","brussels","bs","bt","build","builders","business","buy","buzz","bv","bw","by","bz","bzh","ca","cab","cafe","cal","call","calvinklein","cam","camera","camp","canon","capetown","capital","capitalone","car","caravan","cards","care","career","careers","cars","casa","case","cash","casino","cat","catering","catholic","cba","cbn","cbre","cbs","cc","cd","center","ceo","cern","cf","cfa","cfd","cg","ch","chanel","channel","charity","chase","chat","cheap","chintai","christmas","chrome","church","ci","cipriani","circle","cisco","citadel","citi","citic","city","cityeats","ck","cl","claims","cleaning","click","clinic","clinique","clothing","cloud","club","clubmed","cm","cn","co","coach","codes","coffee","college","cologne","com","comcast","commbank","community","company","compare","computer","comsec","condos","construction","consulting","contact","contractors","cooking","cookingchannel","cool","coop","corsica","country","coupon","coupons","courses","cpa","cr","credit","creditcard","creditunion","cricket","crown","crs","cruise","cruises","cu","cuisinella","cv","cw","cx","cy","cymru","cyou","cz","dabur","dad","dance","data","date","dating","datsun","day","dclk","dds","de","deal","dealer","deals","degree","delivery","dell","deloitte","delta","democrat","dental","dentist","desi","design","dev","dhl","diamonds","diet","digital","direct","directory","discount","discover","dish","diy","dj","dk","dm","dnp","do","docs","doctor","dog","domains","dot","download","drive","dtv","dubai","dunlop","dupont","durban","dvag","dvr","dz","earth","eat","ec","eco","edeka","edu","education","ee","eg","email","emerck","energy","engineer","engineering","enterprises","epson","equipment","er","ericsson","erni","es","esq","estate","et","etisalat","eu","eurovision","eus","events","exchange","expert","exposed","express","extraspace","fage","fail","fairwinds","faith","family","fan","fans","farm","farmers","fashion","fast","fedex","feedback","ferrari","ferrero","fi","fiat","fidelity","fido","film","final","finance","financial","fire","firestone","firmdale","fish","fishing","fit","fitness","fj","fk","flickr","flights","flir","florist","flowers","fly","fm","fo","foo","food","foodnetwork","football","ford","forex","forsale","forum","foundation","fox","fr","free","fresenius","frl","frogans","frontdoor","frontier","ftr","fujitsu","fun","fund","furniture","futbol","fyi","ga","gal","gallery","gallo","gallup","game","games","gap","garden","gay","gb","gbiz","gd","gdn","ge","gea","gent","genting","george","gf","gg","ggee","gh","gi","gift","gifts","gives","giving","gl","glass","gle","global","globo","gm","gmail","gmbh","gmo","gmx","gn","godaddy","gold","goldpoint","golf","goo","goodyear","goog","google","gop","got","gov","gp","gq","gr","grainger","graphics","gratis","green","gripe","grocery","group","gs","gt","gu","guardian","gucci","guge","guide","guitars","guru","gw","gy","hair","hamburg","hangout","haus","hbo","hdfc","hdfcbank","health","healthcare","help","helsinki","here","hermes","hgtv","hiphop","hisamitsu","hitachi","hiv","hk","hkt","hm","hn","hockey","holdings","holiday","homedepot","homegoods","homes","homesense","honda","horse","hospital","host","hosting","hot","hoteles","hotels","hotmail","house","how","hr","hsbc","ht","hu","hughes","hyatt","hyundai","ibm","icbc","ice","icu","id","ie","ieee","ifm","ikano","il","im","imamat","imdb","immo","immobilien","in","inc","industries","infiniti","info","ing","ink","institute","insurance","insure","int","international","intuit","investments","io","ipiranga","iq","ir","irish","is","ismaili","ist","istanbul","it","itau","itv","jaguar","java","jcb","je","jeep","jetzt","jewelry","jio","jll","jm","jmp","jnj","jo","jobs","joburg","jot","joy","jp","jpmorgan","jprs","juegos","juniper","kaufen","kddi","ke","kerryhotels","kerrylogistics","kerryproperties","kfh","kg","kh","ki","kia","kids","kim","kinder","kindle","kitchen","kiwi","km","kn","koeln","komatsu","kosher","kp","kpmg","kpn","kr","krd","kred","kuokgroup","kw","ky","kyoto","kz","la","lacaixa","lamborghini","lamer","lancaster","lancia","land","landrover","lanxess","lasalle","lat","latino","latrobe","law","lawyer","lb","lc","lds","lease","leclerc","lefrak","legal","lego","lexus","lgbt","li","lidl","life","lifeinsurance","lifestyle","lighting","like","lilly","limited","limo","lincoln","linde","link","lipsy","live","living","lk","llc","llp","loan","loans","locker","locus","loft","lol","london","lotte","lotto","love","lpl","lplfinancial","lr","ls","lt","ltd","ltda","lu","lundbeck","luxe","luxury","lv","ly","ma","macys","madrid","maif","maison","makeup","man","management","mango","map","market","marketing","markets","marriott","marshalls","maserati","mattel","mba","mc","mckinsey","md","me","med","media","meet","melbourne","meme","memorial","men","menu","merckmsd","mg","mh","miami","microsoft","mil","mini","mint","mit","mitsubishi","mk","ml","mlb","mls","mm","mma","mn","mo","mobi","mobile","moda","moe","moi","mom","monash","money","monster","mormon","mortgage","moscow","moto","motorcycles","mov","movie","mp","mq","mr","ms","msd","mt","mtn","mtr","mu","museum","music","mutual","mv","mw","mx","my","mz","na","nab","nagoya","name","natura","navy","nba","nc","ne","nec","net","netbank","netflix","network","neustar","new","news","next","nextdirect","nexus","nf","nfl","ng","ngo","nhk","ni","nico","nike","nikon","ninja","nissan","nissay","nl","no","nokia","northwesternmutual","norton","now","nowruz","nowtv","np","nr","nra","nrw","ntt","nu","nyc","nz","obi","observer","office","okinawa","olayan","olayangroup","oldnavy","ollo","om","omega","one","ong","onl","online","ooo","open","oracle","orange","org","organic","origins","osaka","otsuka","ott","ovh","pa","page","panasonic","paris","pars","partners","parts","party","passagens","pay","pccw","pe","pet","pf","pfizer","pg","ph","pharmacy","phd","philips","phone","photo","photography","photos","physio","pics","pictet","pictures","pid","pin","ping","pink","pioneer","pizza","pk","pl","place","play","playstation","plumbing","plus","pm","pn","pnc","pohl","poker","politie","porn","post","pr","pramerica","praxi","press","prime","pro","prod","productions","prof","progressive","promo","properties","property","protection","pru","prudential","ps","pt","pub","pw","pwc","py","qa","qpon","quebec","quest","racing","radio","re","read","realestate","realtor","realty","recipes","red","redstone","redumbrella","rehab","reise","reisen","reit","reliance","ren","rent","rentals","repair","report","republican","rest","restaurant","review","reviews","rexroth","rich","richardli","ricoh","ril","rio","rip","ro","rocher","rocks","rodeo","rogers","room","rs","rsvp","ru","rugby","ruhr","run","rw","rwe","ryukyu","sa","saarland","safe","safety","sakura","sale","salon","samsclub","samsung","sandvik","sandvikcoromant","sanofi","sap","sarl","sas","save","saxo","sb","sbi","sbs","sc","sca","scb","schaeffler","schmidt","scholarships","school","schule","schwarz","science","scot","sd","se","search","seat","secure","security","seek","select","sener","services","ses","seven","sew","sex","sexy","sfr","sg","sh","shangrila","sharp","shaw","shell","shia","shiksha","shoes","shop","shopping","shouji","show","showtime","si","silk","sina","singles","site","sj","sk","ski","skin","sky","skype","sl","sling","sm","smart","smile","sn","sncf","so","soccer","social","softbank","software","sohu","solar","solutions","song","sony","soy","spa","space","sport","spot","sr","srl","ss","st","stada","staples","star","statebank","statefarm","stc","stcgroup","stockholm","storage","store","stream","studio","study","style","su","sucks","supplies","supply","support","surf","surgery","suzuki","sv","swatch","swiss","sx","sy","sydney","systems","sz","tab","taipei","talk","taobao","target","tatamotors","tatar","tattoo","tax","taxi","tc","tci","td","tdk","team","tech","technology","tel","temasek","tennis","teva","tf","tg","th","thd","theater","theatre","tiaa","tickets","tienda","tiffany","tips","tires","tirol","tj","tjmaxx","tjx","tk","tkmaxx","tl","tm","tmall","tn","to","today","tokyo","tools","top","toray","toshiba","total","tours","town","toyota","toys","tr","trade","trading","training","travel","travelchannel","travelers","travelersinsurance","trust","trv","tt","tube","tui","tunes","tushu","tv","tvs","tw","tz","ua","ubank","ubs","ug","uk","unicom","university","uno","uol","ups","us","uy","uz","va","vacations","vana","vanguard","vc","ve","vegas","ventures","verisign","vermögensberater","vermögensberatung","versicherung","vet","vg","vi","viajes","video","vig","viking","villas","vin","vip","virgin","visa","vision","viva","vivo","vlaanderen","vn","vodka","volkswagen","volvo","vote","voting","voto","voyage","vu","vuelos","wales","walmart","walter","wang","wanggou","watch","watches","weather","weatherchannel","webcam","weber","website","wed","wedding","weibo","weir","wf","whoswho","wien","wiki","williamhill","win","windows","wine","winners","wme","wolterskluwer","woodside","work","works","world","wow","ws","wtc","wtf","xbox","xerox","xfinity","xihuan","xin","xxx","xyz","yachts","yahoo","yamaxun","yandex","ye","yodobashi","yoga","yokohama","you","youtube","yt","yun","za","zappos","zara","zero","zip","zm","zone","zuerich","zw","ελ","ευ","бг","бел","дети","ею","католик","ком","мкд","мон","москва","онлайн","орг","рус","рф","сайт","срб","укр","қаз","հայ","ישראל","קום","ابوظبي","اتصالات","ارامكو","الاردن","البحرين","الجزائر","السعودية","العليان","المغرب","امارات","ایران","بارت","بازار","بيتك","بھارت","تونس","سودان","سورية","شبكة","عراق","عرب","عمان","فلسطين","قطر","كاثوليك","كوم","مصر","مليسيا","موريتانيا","موقع","همراه","پاکستان","ڀارت","कॉम","नेट","भारत","भारतम्","भारोत","संगठन","বাংলা","ভারত","ভাৰত","ਭਾਰਤ","ભારત","ଭାରତ","இந்தியா","இலங்கை","சிங்கப்பூர்","భారత్","ಭಾರತ","ഭാരതം","ලංකා","คอม","ไทย","ລາວ","გე","みんな","アマゾン","クラウド","グーグル","コム","ストア","セール","ファッション","ポイント","世界","中信","中国","中國","中文网","亚马逊","企业","佛山","信息","健康","八卦","公司","公益","台湾","台灣","商城","商店","商标","嘉里","嘉里大酒店","在线","大拿","天主教","娱乐","家電","广东","微博","慈善","我爱你","手机","招聘","政务","政府","新加坡","新闻","时尚","書籍","机构","淡马锡","游戏","澳門","点看","移动","组织机构","网址","网店","网站","网络","联通","诺基亚","谷歌","购物","通販","集团","電訊盈科","飞利浦","食品","餐厅","香格里拉","香港","닷넷","닷컴","삼성","한국"];function qU(r){function e(n,o){if(n.test(o)){const s=o.match(n);return s?s[0].length:0}return 0}function t(n){return(o,s)=>{const a=o.slice(s);return e(n,a)}}r.linkify.set({fuzzyLink:!0,fuzzyEmail:!1,fuzzyIP:!1}).tlds(u$).tlds("onion",!0).add("ac",{validate:t(/\d+/),normalize(n){n.url=`https://www.acfun.cn/v/${n.url}`}}).add("av",{validate:t(/\d+/),normalize(n){n.url=`https://www.bilibili.com/video/${n.url}`}}).add("bv",{validate:t(/[a-zA-Z0-9]+/),normalize(n){n.url=`https://www.bilibili.com/video/${n.url}`}}).add("sm",{validate:t(/\d+/),normalize(n){n.url=`https://www.nicovideo.jp/watch/${n.url}`}}).add("youtube-",{validate:t(/[-\w]+/),normalize(n){n.url=`https://www.youtube.com/watch?v=${n.url.replace(/^youtube-/,"")}`}}).add("mylist/",{validate:t(/\d+/),normalize(n){n.url=`https://www.nicovideo.jp/${n.url}`}})}const jr={};function SU(r){r.inline.ruler.before("link","face",(e,t)=>{if(t||e.posMax-e.pos<10||!e.src.slice(e.pos).startsWith("[[face:"))return!1;const n=e.pos,o=e.posMax;e.pos++;let s;for(;e.pos ` tags.\n **/\nMarkdownIt.prototype.renderInline = function (src, env) {\n env = env || {};\n\n return this.renderer.render(this.parseInline(src, env), this.options, env);\n};\n\n\nmodule.exports = MarkdownIt;\n","'use strict';\n\n\nmodule.exports = require('./lib/');\n","import type MarkdownIt from 'markdown-it'\nimport tlds from 'tlds'\n\nexport function linkifyAdditionPlugin(markdownIt: MarkdownIt) {\n function match(reg: RegExp, text: string): number {\n if (reg.test(text)) {\n const m = text.match(reg)\n return m ? m[0].length : 0\n }\n return 0\n }\n\n function val(reg: RegExp) {\n return (text: string, pos: number) => {\n const tail = text.slice(pos)\n return match(reg, tail)\n }\n }\n\n markdownIt.linkify\n .set({\n fuzzyLink: true,\n fuzzyEmail: false,\n fuzzyIP: false,\n })\n .tlds(tlds) // full tld list\n .tlds('onion', true)\n .add('ac', {\n validate: val(/\\d+/),\n normalize(match) {\n match.url = `https://www.acfun.cn/v/${match.url}`\n },\n })\n .add('av', {\n validate: val(/\\d+/),\n normalize(match) {\n match.url = `https://www.bilibili.com/video/${match.url}`\n },\n })\n .add('bv', {\n validate: val(/[a-zA-Z0-9]+/),\n normalize(match) {\n match.url = `https://www.bilibili.com/video/${match.url}`\n },\n })\n .add('sm', {\n validate: val(/\\d+/),\n normalize(match) {\n match.url = `https://www.nicovideo.jp/watch/${match.url}`\n },\n })\n .add('youtube-', {\n validate: val(/[-\\w]+/),\n normalize(match) {\n match.url = `https://www.youtube.com/watch?v=${match.url.replace(/^youtube-/, '')}`\n },\n })\n .add('mylist/', {\n validate: val(/\\d+/),\n normalize(match) {\n match.url = `https://www.nicovideo.jp/${match.url}`\n },\n })\n}\n\n// { name: url }\nconst faces: Record' +\n escapeHtml(tokens[idx].content) +\n '
';\n};\n\n\ndefault_rules.code_block = function (tokens, idx, options, env, slf) {\n var token = tokens[idx];\n\n return '
\\n';\n};\n\n\ndefault_rules.fence = function (tokens, idx, options, env, slf) {\n var token = tokens[idx],\n info = token.info ? unescapeAll(token.info).trim() : '',\n langName = '',\n langAttrs = '',\n highlighted, i, arr, tmpAttrs, tmpToken;\n\n if (info) {\n arr = info.split(/(\\s+)/g);\n langName = arr[0];\n langAttrs = arr.slice(2).join('');\n }\n\n if (options.highlight) {\n highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content);\n } else {\n highlighted = escapeHtml(token.content);\n }\n\n if (highlighted.indexOf('' +\n escapeHtml(tokens[idx].content) +\n '
\\n';\n }\n\n\n return ''\n + highlighted\n + '
\\n';\n};\n\n\ndefault_rules.image = function (tokens, idx, options, env, slf) {\n var token = tokens[idx];\n\n // \"alt\" attr MUST be set, even if empty. Because it's mandatory and\n // should be placed on proper position for tests.\n //\n // Replace content with actual value\n\n token.attrs[token.attrIndex('alt')][1] =\n slf.renderInlineAsText(token.children, options, env);\n\n return slf.renderToken(tokens, idx, options);\n};\n\n\ndefault_rules.hardbreak = function (tokens, idx, options /*, env */) {\n return options.xhtmlOut ? ''\n + highlighted\n + '
\\n' : '
\\n';\n};\ndefault_rules.softbreak = function (tokens, idx, options /*, env */) {\n return options.breaks ? (options.xhtmlOut ? '
\\n' : '
\\n') : '\\n';\n};\n\n\ndefault_rules.text = function (tokens, idx /*, options, env */) {\n return escapeHtml(tokens[idx].content);\n};\n\n\ndefault_rules.html_block = function (tokens, idx /*, options, env */) {\n return tokens[idx].content;\n};\ndefault_rules.html_inline = function (tokens, idx /*, options, env */) {\n return tokens[idx].content;\n};\n\n\n/**\n * new Renderer()\n *\n * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.\n **/\nfunction Renderer() {\n\n /**\n * Renderer#rules -> Object\n *\n * Contains render rules for tokens. Can be updated and extended.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.renderer.rules.strong_open = function () { return ''; };\n * md.renderer.rules.strong_close = function () { return ''; };\n *\n * var result = md.renderInline(...);\n * ```\n *\n * Each rule is called as independent static function with fixed signature:\n *\n * ```javascript\n * function my_token_render(tokens, idx, options, env, renderer) {\n * // ...\n * return renderedHTML;\n * }\n * ```\n *\n * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js)\n * for more details and examples.\n **/\n this.rules = assign({}, default_rules);\n}\n\n\n/**\n * Renderer.renderAttrs(token) -> String\n *\n * Render token attributes to string.\n **/\nRenderer.prototype.renderAttrs = function renderAttrs(token) {\n var i, l, result;\n\n if (!token.attrs) { return ''; }\n\n result = '';\n\n for (i = 0, l = token.attrs.length; i < l; i++) {\n result += ' ' + escapeHtml(token.attrs[i][0]) + '=\"' + escapeHtml(token.attrs[i][1]) + '\"';\n }\n\n return result;\n};\n\n\n/**\n * Renderer.renderToken(tokens, idx, options) -> String\n * - tokens (Array): list of tokens\n * - idx (Numbed): token index to render\n * - options (Object): params of parser instance\n *\n * Default token renderer. Can be overriden by custom function\n * in [[Renderer#rules]].\n **/\nRenderer.prototype.renderToken = function renderToken(tokens, idx, options) {\n var nextToken,\n result = '',\n needLf = false,\n token = tokens[idx];\n\n // Tight list paragraphs\n if (token.hidden) {\n return '';\n }\n\n // Insert a newline between hidden paragraph and subsequent opening\n // block-level tag.\n //\n // For example, here we should insert a newline before blockquote:\n // - a\n // >\n //\n if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {\n result += '\\n';\n }\n\n // Add token name, e.g. ``.\n //\n needLf = false;\n }\n }\n }\n }\n\n result += needLf ? '>\\n' : '>';\n\n return result;\n};\n\n\n/**\n * Renderer.renderInline(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * The same as [[Renderer.render]], but for single token of `inline` type.\n **/\nRenderer.prototype.renderInline = function (tokens, options, env) {\n var type,\n result = '',\n rules = this.rules;\n\n for (var i = 0, len = tokens.length; i < len; i++) {\n type = tokens[i].type;\n\n if (typeof rules[type] !== 'undefined') {\n result += rules[type](tokens, i, options, env, this);\n } else {\n result += this.renderToken(tokens, i, options);\n }\n }\n\n return result;\n};\n\n\n/** internal\n * Renderer.renderInlineAsText(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * Special kludge for image `alt` attributes to conform CommonMark spec.\n * Don't try to use it! Spec requires to show `alt` content with stripped markup,\n * instead of simple escaping.\n **/\nRenderer.prototype.renderInlineAsText = function (tokens, options, env) {\n var result = '';\n\n for (var i = 0, len = tokens.length; i < len; i++) {\n if (tokens[i].type === 'text') {\n result += tokens[i].content;\n } else if (tokens[i].type === 'image') {\n result += this.renderInlineAsText(tokens[i].children, options, env);\n } else if (tokens[i].type === 'softbreak') {\n result += '\\n';\n }\n }\n\n return result;\n};\n\n\n/**\n * Renderer.render(tokens, options, env) -> String\n * - tokens (Array): list on block tokens to render\n * - options (Object): params of parser instance\n * - env (Object): additional data from parsed input (references, for example)\n *\n * Takes token stream and generates HTML. Probably, you will never need to call\n * this method directly.\n **/\nRenderer.prototype.render = function (tokens, options, env) {\n var i, len, type,\n result = '',\n rules = this.rules;\n\n for (i = 0, len = tokens.length; i < len; i++) {\n type = tokens[i].type;\n\n if (type === 'inline') {\n result += this.renderInline(tokens[i].children, options, env);\n } else if (typeof rules[type] !== 'undefined') {\n result += rules[tokens[i].type](tokens, i, options, env, this);\n } else {\n result += this.renderToken(tokens, i, options, env);\n }\n }\n\n return result;\n};\n\nmodule.exports = Renderer;\n","/**\n * class Ruler\n *\n * Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and\n * [[MarkdownIt#inline]] to manage sequences of functions (rules):\n *\n * - keep rules in defined order\n * - assign the name to each rule\n * - enable/disable rules\n * - add/replace rules\n * - allow assign rules to additional named chains (in the same)\n * - cacheing lists of active rules\n *\n * You will not need use this class directly until write plugins. For simple\n * rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and\n * [[MarkdownIt.use]].\n **/\n'use strict';\n\n\n/**\n * new Ruler()\n **/\nfunction Ruler() {\n // List of added rules. Each element is:\n //\n // {\n // name: XXX,\n // enabled: Boolean,\n // fn: Function(),\n // alt: [ name2, name3 ]\n // }\n //\n this.__rules__ = [];\n\n // Cached rule chains.\n //\n // First level - chain name, '' for default.\n // Second level - diginal anchor for fast filtering by charcodes.\n //\n this.__cache__ = null;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Helper methods, should not be used directly\n\n\n// Find rule index by name\n//\nRuler.prototype.__find__ = function (name) {\n for (var i = 0; i < this.__rules__.length; i++) {\n if (this.__rules__[i].name === name) {\n return i;\n }\n }\n return -1;\n};\n\n\n// Build rules lookup cache\n//\nRuler.prototype.__compile__ = function () {\n var self = this;\n var chains = [ '' ];\n\n // collect unique names\n self.__rules__.forEach(function (rule) {\n if (!rule.enabled) { return; }\n\n rule.alt.forEach(function (altName) {\n if (chains.indexOf(altName) < 0) {\n chains.push(altName);\n }\n });\n });\n\n self.__cache__ = {};\n\n chains.forEach(function (chain) {\n self.__cache__[chain] = [];\n self.__rules__.forEach(function (rule) {\n if (!rule.enabled) { return; }\n\n if (chain && rule.alt.indexOf(chain) < 0) { return; }\n\n self.__cache__[chain].push(rule.fn);\n });\n });\n};\n\n\n/**\n * Ruler.at(name, fn [, options])\n * - name (String): rule name to replace.\n * - fn (Function): new rule function.\n * - options (Object): new rule options (not mandatory).\n *\n * Replace rule by name with new function & options. Throws error if name not\n * found.\n *\n * ##### Options:\n *\n * - __alt__ - array with names of \"alternate\" chains.\n *\n * ##### Example\n *\n * Replace existing typographer replacement rule with new one:\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.core.ruler.at('replacements', function replace(state) {\n * //...\n * });\n * ```\n **/\nRuler.prototype.at = function (name, fn, options) {\n var index = this.__find__(name);\n var opt = options || {};\n\n if (index === -1) { throw new Error('Parser rule not found: ' + name); }\n\n this.__rules__[index].fn = fn;\n this.__rules__[index].alt = opt.alt || [];\n this.__cache__ = null;\n};\n\n\n/**\n * Ruler.before(beforeName, ruleName, fn [, options])\n * - beforeName (String): new rule will be added before this one.\n * - ruleName (String): name of added rule.\n * - fn (Function): rule function.\n * - options (Object): rule options (not mandatory).\n *\n * Add new rule to chain before one with given name. See also\n * [[Ruler.after]], [[Ruler.push]].\n *\n * ##### Options:\n *\n * - __alt__ - array with names of \"alternate\" chains.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.block.ruler.before('paragraph', 'my_rule', function replace(state) {\n * //...\n * });\n * ```\n **/\nRuler.prototype.before = function (beforeName, ruleName, fn, options) {\n var index = this.__find__(beforeName);\n var opt = options || {};\n\n if (index === -1) { throw new Error('Parser rule not found: ' + beforeName); }\n\n this.__rules__.splice(index, 0, {\n name: ruleName,\n enabled: true,\n fn: fn,\n alt: opt.alt || []\n });\n\n this.__cache__ = null;\n};\n\n\n/**\n * Ruler.after(afterName, ruleName, fn [, options])\n * - afterName (String): new rule will be added after this one.\n * - ruleName (String): name of added rule.\n * - fn (Function): rule function.\n * - options (Object): rule options (not mandatory).\n *\n * Add new rule to chain after one with given name. See also\n * [[Ruler.before]], [[Ruler.push]].\n *\n * ##### Options:\n *\n * - __alt__ - array with names of \"alternate\" chains.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.inline.ruler.after('text', 'my_rule', function replace(state) {\n * //...\n * });\n * ```\n **/\nRuler.prototype.after = function (afterName, ruleName, fn, options) {\n var index = this.__find__(afterName);\n var opt = options || {};\n\n if (index === -1) { throw new Error('Parser rule not found: ' + afterName); }\n\n this.__rules__.splice(index + 1, 0, {\n name: ruleName,\n enabled: true,\n fn: fn,\n alt: opt.alt || []\n });\n\n this.__cache__ = null;\n};\n\n/**\n * Ruler.push(ruleName, fn [, options])\n * - ruleName (String): name of added rule.\n * - fn (Function): rule function.\n * - options (Object): rule options (not mandatory).\n *\n * Push new rule to the end of chain. See also\n * [[Ruler.before]], [[Ruler.after]].\n *\n * ##### Options:\n *\n * - __alt__ - array with names of \"alternate\" chains.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * md.core.ruler.push('my_rule', function replace(state) {\n * //...\n * });\n * ```\n **/\nRuler.prototype.push = function (ruleName, fn, options) {\n var opt = options || {};\n\n this.__rules__.push({\n name: ruleName,\n enabled: true,\n fn: fn,\n alt: opt.alt || []\n });\n\n this.__cache__ = null;\n};\n\n\n/**\n * Ruler.enable(list [, ignoreInvalid]) -> Array\n * - list (String|Array): list of rule names to enable.\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * Enable rules with given names. If any rule name not found - throw Error.\n * Errors can be disabled by second param.\n *\n * Returns list of found rule names (if no exception happened).\n *\n * See also [[Ruler.disable]], [[Ruler.enableOnly]].\n **/\nRuler.prototype.enable = function (list, ignoreInvalid) {\n if (!Array.isArray(list)) { list = [ list ]; }\n\n var result = [];\n\n // Search by name and enable\n list.forEach(function (name) {\n var idx = this.__find__(name);\n\n if (idx < 0) {\n if (ignoreInvalid) { return; }\n throw new Error('Rules manager: invalid rule name ' + name);\n }\n this.__rules__[idx].enabled = true;\n result.push(name);\n }, this);\n\n this.__cache__ = null;\n return result;\n};\n\n\n/**\n * Ruler.enableOnly(list [, ignoreInvalid])\n * - list (String|Array): list of rule names to enable (whitelist).\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * Enable rules with given names, and disable everything else. If any rule name\n * not found - throw Error. Errors can be disabled by second param.\n *\n * See also [[Ruler.disable]], [[Ruler.enable]].\n **/\nRuler.prototype.enableOnly = function (list, ignoreInvalid) {\n if (!Array.isArray(list)) { list = [ list ]; }\n\n this.__rules__.forEach(function (rule) { rule.enabled = false; });\n\n this.enable(list, ignoreInvalid);\n};\n\n\n/**\n * Ruler.disable(list [, ignoreInvalid]) -> Array\n * - list (String|Array): list of rule names to disable.\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * Disable rules with given names. If any rule name not found - throw Error.\n * Errors can be disabled by second param.\n *\n * Returns list of found rule names (if no exception happened).\n *\n * See also [[Ruler.enable]], [[Ruler.enableOnly]].\n **/\nRuler.prototype.disable = function (list, ignoreInvalid) {\n if (!Array.isArray(list)) { list = [ list ]; }\n\n var result = [];\n\n // Search by name and disable\n list.forEach(function (name) {\n var idx = this.__find__(name);\n\n if (idx < 0) {\n if (ignoreInvalid) { return; }\n throw new Error('Rules manager: invalid rule name ' + name);\n }\n this.__rules__[idx].enabled = false;\n result.push(name);\n }, this);\n\n this.__cache__ = null;\n return result;\n};\n\n\n/**\n * Ruler.getRules(chainName) -> Array\n *\n * Return array of active functions (rules) for given chain name. It analyzes\n * rules configuration, compiles caches if not exists and returns result.\n *\n * Default chain name is `''` (empty string). It can't be skipped. That's\n * done intentionally, to keep signature monomorphic for high speed.\n **/\nRuler.prototype.getRules = function (chainName) {\n if (this.__cache__ === null) {\n this.__compile__();\n }\n\n // Chain can be empty, if rules disabled. But we still have to return Array.\n return this.__cache__[chainName] || [];\n};\n\nmodule.exports = Ruler;\n","// Normalize input string\n\n'use strict';\n\n\n// https://spec.commonmark.org/0.29/#line-ending\nvar NEWLINES_RE = /\\r\\n?|\\n/g;\nvar NULL_RE = /\\0/g;\n\n\nmodule.exports = function normalize(state) {\n var str;\n\n // Normalize newlines\n str = state.src.replace(NEWLINES_RE, '\\n');\n\n // Replace NULL characters\n str = str.replace(NULL_RE, '\\uFFFD');\n\n state.src = str;\n};\n","'use strict';\n\n\nmodule.exports = function block(state) {\n var token;\n\n if (state.inlineMode) {\n token = new state.Token('inline', '', 0);\n token.content = state.src;\n token.map = [ 0, 1 ];\n token.children = [];\n state.tokens.push(token);\n } else {\n state.md.block.parse(state.src, state.md, state.env, state.tokens);\n }\n};\n","'use strict';\n\nmodule.exports = function inline(state) {\n var tokens = state.tokens, tok, i, l;\n\n // Parse inlines\n for (i = 0, l = tokens.length; i < l; i++) {\n tok = tokens[i];\n if (tok.type === 'inline') {\n state.md.inline.parse(tok.content, state.md, state.env, tok.children);\n }\n }\n};\n","// Replace link-like texts with link nodes.\n//\n// Currently restricted by `md.validateLink()` to http/https/ftp\n//\n'use strict';\n\n\nvar arrayReplaceAt = require('../common/utils').arrayReplaceAt;\n\n\nfunction isLinkOpen(str) {\n return /^\\s]/i.test(str);\n}\nfunction isLinkClose(str) {\n return /^<\\/a\\s*>/i.test(str);\n}\n\n\nmodule.exports = function linkify(state) {\n var i, j, l, tokens, token, currentToken, nodes, ln, text, pos, lastPos,\n level, htmlLinkLevel, url, fullUrl, urlText,\n blockTokens = state.tokens,\n links;\n\n if (!state.md.options.linkify) { return; }\n\n for (j = 0, l = blockTokens.length; j < l; j++) {\n if (blockTokens[j].type !== 'inline' ||\n !state.md.linkify.pretest(blockTokens[j].content)) {\n continue;\n }\n\n tokens = blockTokens[j].children;\n\n htmlLinkLevel = 0;\n\n // We scan from the end, to keep position when new tags added.\n // Use reversed logic in links start/end match\n for (i = tokens.length - 1; i >= 0; i--) {\n currentToken = tokens[i];\n\n // Skip content of markdown links\n if (currentToken.type === 'link_close') {\n i--;\n while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {\n i--;\n }\n continue;\n }\n\n // Skip content of html tag links\n if (currentToken.type === 'html_inline') {\n if (isLinkOpen(currentToken.content) && htmlLinkLevel > 0) {\n htmlLinkLevel--;\n }\n if (isLinkClose(currentToken.content)) {\n htmlLinkLevel++;\n }\n }\n if (htmlLinkLevel > 0) { continue; }\n\n if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {\n\n text = currentToken.content;\n links = state.md.linkify.match(text);\n\n // Now split string to nodes\n nodes = [];\n level = currentToken.level;\n lastPos = 0;\n\n // forbid escape sequence at the start of the string,\n // this avoids http\\://example.com/ from being linkified as\n // http://example.com/\n if (links.length > 0 &&\n links[0].index === 0 &&\n i > 0 &&\n tokens[i - 1].type === 'text_special') {\n links = links.slice(1);\n }\n\n for (ln = 0; ln < links.length; ln++) {\n url = links[ln].url;\n fullUrl = state.md.normalizeLink(url);\n if (!state.md.validateLink(fullUrl)) { continue; }\n\n urlText = links[ln].text;\n\n // Linkifier might send raw hostnames like \"example.com\", where url\n // starts with domain name. So we prepend http:// in those cases,\n // and remove it afterwards.\n //\n if (!links[ln].schema) {\n urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\\/\\//, '');\n } else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {\n urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');\n } else {\n urlText = state.md.normalizeLinkText(urlText);\n }\n\n pos = links[ln].index;\n\n if (pos > lastPos) {\n token = new state.Token('text', '', 0);\n token.content = text.slice(lastPos, pos);\n token.level = level;\n nodes.push(token);\n }\n\n token = new state.Token('link_open', 'a', 1);\n token.attrs = [ [ 'href', fullUrl ] ];\n token.level = level++;\n token.markup = 'linkify';\n token.info = 'auto';\n nodes.push(token);\n\n token = new state.Token('text', '', 0);\n token.content = urlText;\n token.level = level;\n nodes.push(token);\n\n token = new state.Token('link_close', 'a', -1);\n token.level = --level;\n token.markup = 'linkify';\n token.info = 'auto';\n nodes.push(token);\n\n lastPos = links[ln].lastIndex;\n }\n if (lastPos < text.length) {\n token = new state.Token('text', '', 0);\n token.content = text.slice(lastPos);\n token.level = level;\n nodes.push(token);\n }\n\n // replace current node\n blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);\n }\n }\n }\n};\n","// Simple typographic replacements\n//\n// (c) (C) → ©\n// (tm) (TM) → ™\n// (r) (R) → ®\n// +- → ±\n// (p) (P) -> §\n// ... → … (also ?.... → ?.., !.... → !..)\n// ???????? → ???, !!!!! → !!!, `,,` → `,`\n// -- → –, --- → —\n//\n'use strict';\n\n// TODO:\n// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾\n// - multiplications 2 x 4 -> 2 × 4\n\nvar RARE_RE = /\\+-|\\.\\.|\\?\\?\\?\\?|!!!!|,,|--/;\n\n// Workaround for phantomjs - need regex without /g flag,\n// or root check will fail every second time\nvar SCOPED_ABBR_TEST_RE = /\\((c|tm|r)\\)/i;\n\nvar SCOPED_ABBR_RE = /\\((c|tm|r)\\)/ig;\nvar SCOPED_ABBR = {\n c: '©',\n r: '®',\n tm: '™'\n};\n\nfunction replaceFn(match, name) {\n return SCOPED_ABBR[name.toLowerCase()];\n}\n\nfunction replace_scoped(inlineTokens) {\n var i, token, inside_autolink = 0;\n\n for (i = inlineTokens.length - 1; i >= 0; i--) {\n token = inlineTokens[i];\n\n if (token.type === 'text' && !inside_autolink) {\n token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);\n }\n\n if (token.type === 'link_open' && token.info === 'auto') {\n inside_autolink--;\n }\n\n if (token.type === 'link_close' && token.info === 'auto') {\n inside_autolink++;\n }\n }\n}\n\nfunction replace_rare(inlineTokens) {\n var i, token, inside_autolink = 0;\n\n for (i = inlineTokens.length - 1; i >= 0; i--) {\n token = inlineTokens[i];\n\n if (token.type === 'text' && !inside_autolink) {\n if (RARE_RE.test(token.content)) {\n token.content = token.content\n .replace(/\\+-/g, '±')\n // .., ..., ....... -> …\n // but ?..... & !..... -> ?.. & !..\n .replace(/\\.{2,}/g, '…').replace(/([?!])…/g, '$1..')\n .replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')\n // em-dash\n .replace(/(^|[^-])---(?=[^-]|$)/mg, '$1\\u2014')\n // en-dash\n .replace(/(^|\\s)--(?=\\s|$)/mg, '$1\\u2013')\n .replace(/(^|[^-\\s])--(?=[^-\\s]|$)/mg, '$1\\u2013');\n }\n }\n\n if (token.type === 'link_open' && token.info === 'auto') {\n inside_autolink--;\n }\n\n if (token.type === 'link_close' && token.info === 'auto') {\n inside_autolink++;\n }\n }\n}\n\n\nmodule.exports = function replace(state) {\n var blkIdx;\n\n if (!state.md.options.typographer) { return; }\n\n for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {\n\n if (state.tokens[blkIdx].type !== 'inline') { continue; }\n\n if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {\n replace_scoped(state.tokens[blkIdx].children);\n }\n\n if (RARE_RE.test(state.tokens[blkIdx].content)) {\n replace_rare(state.tokens[blkIdx].children);\n }\n\n }\n};\n","// Convert straight quotation marks to typographic ones\n//\n'use strict';\n\n\nvar isWhiteSpace = require('../common/utils').isWhiteSpace;\nvar isPunctChar = require('../common/utils').isPunctChar;\nvar isMdAsciiPunct = require('../common/utils').isMdAsciiPunct;\n\nvar QUOTE_TEST_RE = /['\"]/;\nvar QUOTE_RE = /['\"]/g;\nvar APOSTROPHE = '\\u2019'; /* ’ */\n\n\nfunction replaceAt(str, index, ch) {\n return str.slice(0, index) + ch + str.slice(index + 1);\n}\n\nfunction process_inlines(tokens, state) {\n var i, token, text, t, pos, max, thisLevel, item, lastChar, nextChar,\n isLastPunctChar, isNextPunctChar, isLastWhiteSpace, isNextWhiteSpace,\n canOpen, canClose, j, isSingle, stack, openQuote, closeQuote;\n\n stack = [];\n\n for (i = 0; i < tokens.length; i++) {\n token = tokens[i];\n\n thisLevel = tokens[i].level;\n\n for (j = stack.length - 1; j >= 0; j--) {\n if (stack[j].level <= thisLevel) { break; }\n }\n stack.length = j + 1;\n\n if (token.type !== 'text') { continue; }\n\n text = token.content;\n pos = 0;\n max = text.length;\n\n /*eslint no-labels:0,block-scoped-var:0*/\n OUTER:\n while (pos < max) {\n QUOTE_RE.lastIndex = pos;\n t = QUOTE_RE.exec(text);\n if (!t) { break; }\n\n canOpen = canClose = true;\n pos = t.index + 1;\n isSingle = (t[0] === \"'\");\n\n // Find previous character,\n // default to space if it's the beginning of the line\n //\n lastChar = 0x20;\n\n if (t.index - 1 >= 0) {\n lastChar = text.charCodeAt(t.index - 1);\n } else {\n for (j = i - 1; j >= 0; j--) {\n if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20\n if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'\n\n lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);\n break;\n }\n }\n\n // Find next character,\n // default to space if it's the end of the line\n //\n nextChar = 0x20;\n\n if (pos < max) {\n nextChar = text.charCodeAt(pos);\n } else {\n for (j = i + 1; j < tokens.length; j++) {\n if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20\n if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'\n\n nextChar = tokens[j].content.charCodeAt(0);\n break;\n }\n }\n\n isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));\n isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));\n\n isLastWhiteSpace = isWhiteSpace(lastChar);\n isNextWhiteSpace = isWhiteSpace(nextChar);\n\n if (isNextWhiteSpace) {\n canOpen = false;\n } else if (isNextPunctChar) {\n if (!(isLastWhiteSpace || isLastPunctChar)) {\n canOpen = false;\n }\n }\n\n if (isLastWhiteSpace) {\n canClose = false;\n } else if (isLastPunctChar) {\n if (!(isNextWhiteSpace || isNextPunctChar)) {\n canClose = false;\n }\n }\n\n if (nextChar === 0x22 /* \" */ && t[0] === '\"') {\n if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {\n // special case: 1\"\" - count first quote as an inch\n canClose = canOpen = false;\n }\n }\n\n if (canOpen && canClose) {\n // Replace quotes in the middle of punctuation sequence, but not\n // in the middle of the words, i.e.:\n //\n // 1. foo \" bar \" baz - not replaced\n // 2. foo-\"-bar-\"-baz - replaced\n // 3. foo\"bar\"baz - not replaced\n //\n canOpen = isLastPunctChar;\n canClose = isNextPunctChar;\n }\n\n if (!canOpen && !canClose) {\n // middle of word\n if (isSingle) {\n token.content = replaceAt(token.content, t.index, APOSTROPHE);\n }\n continue;\n }\n\n if (canClose) {\n // this could be a closing quote, rewind the stack to get a match\n for (j = stack.length - 1; j >= 0; j--) {\n item = stack[j];\n if (stack[j].level < thisLevel) { break; }\n if (item.single === isSingle && stack[j].level === thisLevel) {\n item = stack[j];\n\n if (isSingle) {\n openQuote = state.md.options.quotes[2];\n closeQuote = state.md.options.quotes[3];\n } else {\n openQuote = state.md.options.quotes[0];\n closeQuote = state.md.options.quotes[1];\n }\n\n // replace token.content *before* tokens[item.token].content,\n // because, if they are pointing at the same token, replaceAt\n // could mess up indices when quote length != 1\n token.content = replaceAt(token.content, t.index, closeQuote);\n tokens[item.token].content = replaceAt(\n tokens[item.token].content, item.pos, openQuote);\n\n pos += closeQuote.length - 1;\n if (item.token === i) { pos += openQuote.length - 1; }\n\n text = token.content;\n max = text.length;\n\n stack.length = j;\n continue OUTER;\n }\n }\n }\n\n if (canOpen) {\n stack.push({\n token: i,\n pos: t.index,\n single: isSingle,\n level: thisLevel\n });\n } else if (canClose && isSingle) {\n token.content = replaceAt(token.content, t.index, APOSTROPHE);\n }\n }\n }\n}\n\n\nmodule.exports = function smartquotes(state) {\n /*eslint max-depth:0*/\n var blkIdx;\n\n if (!state.md.options.typographer) { return; }\n\n for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {\n\n if (state.tokens[blkIdx].type !== 'inline' ||\n !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {\n continue;\n }\n\n process_inlines(state.tokens[blkIdx].children, state);\n }\n};\n","// Join raw text tokens with the rest of the text\n//\n// This is set as a separate rule to provide an opportunity for plugins\n// to run text replacements after text join, but before escape join.\n//\n// For example, `\\:)` shouldn't be replaced with an emoji.\n//\n'use strict';\n\n\nmodule.exports = function text_join(state) {\n var j, l, tokens, curr, max, last,\n blockTokens = state.tokens;\n\n for (j = 0, l = blockTokens.length; j < l; j++) {\n if (blockTokens[j].type !== 'inline') continue;\n\n tokens = blockTokens[j].children;\n max = tokens.length;\n\n for (curr = 0; curr < max; curr++) {\n if (tokens[curr].type === 'text_special') {\n tokens[curr].type = 'text';\n }\n }\n\n for (curr = last = 0; curr < max; curr++) {\n if (tokens[curr].type === 'text' &&\n curr + 1 < max &&\n tokens[curr + 1].type === 'text') {\n\n // collapse two adjacent text nodes\n tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;\n } else {\n if (curr !== last) { tokens[last] = tokens[curr]; }\n\n last++;\n }\n }\n\n if (curr !== last) {\n tokens.length = last;\n }\n }\n};\n","// Token class\n\n'use strict';\n\n\n/**\n * class Token\n **/\n\n/**\n * new Token(type, tag, nesting)\n *\n * Create new token and fill passed properties.\n **/\nfunction Token(type, tag, nesting) {\n /**\n * Token#type -> String\n *\n * Type of the token (string, e.g. \"paragraph_open\")\n **/\n this.type = type;\n\n /**\n * Token#tag -> String\n *\n * html tag name, e.g. \"p\"\n **/\n this.tag = tag;\n\n /**\n * Token#attrs -> Array\n *\n * Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`\n **/\n this.attrs = null;\n\n /**\n * Token#map -> Array\n *\n * Source map info. Format: `[ line_begin, line_end ]`\n **/\n this.map = null;\n\n /**\n * Token#nesting -> Number\n *\n * Level change (number in {-1, 0, 1} set), where:\n *\n * - `1` means the tag is opening\n * - `0` means the tag is self-closing\n * - `-1` means the tag is closing\n **/\n this.nesting = nesting;\n\n /**\n * Token#level -> Number\n *\n * nesting level, the same as `state.level`\n **/\n this.level = 0;\n\n /**\n * Token#children -> Array\n *\n * An array of child nodes (inline and img tokens)\n **/\n this.children = null;\n\n /**\n * Token#content -> String\n *\n * In a case of self-closing tag (code, html, fence, etc.),\n * it has contents of this tag.\n **/\n this.content = '';\n\n /**\n * Token#markup -> String\n *\n * '*' or '_' for emphasis, fence string for fence, etc.\n **/\n this.markup = '';\n\n /**\n * Token#info -> String\n *\n * Additional information:\n *\n * - Info string for \"fence\" tokens\n * - The value \"auto\" for autolink \"link_open\" and \"link_close\" tokens\n * - The string value of the item marker for ordered-list \"list_item_open\" tokens\n **/\n this.info = '';\n\n /**\n * Token#meta -> Object\n *\n * A place for plugins to store an arbitrary data\n **/\n this.meta = null;\n\n /**\n * Token#block -> Boolean\n *\n * True for block-level tokens, false for inline tokens.\n * Used in renderer to calculate line breaks\n **/\n this.block = false;\n\n /**\n * Token#hidden -> Boolean\n *\n * If it's true, ignore this element when rendering. Used for tight lists\n * to hide paragraphs.\n **/\n this.hidden = false;\n}\n\n\n/**\n * Token.attrIndex(name) -> Number\n *\n * Search attribute index by name.\n **/\nToken.prototype.attrIndex = function attrIndex(name) {\n var attrs, i, len;\n\n if (!this.attrs) { return -1; }\n\n attrs = this.attrs;\n\n for (i = 0, len = attrs.length; i < len; i++) {\n if (attrs[i][0] === name) { return i; }\n }\n return -1;\n};\n\n\n/**\n * Token.attrPush(attrData)\n *\n * Add `[ name, value ]` attribute to list. Init attrs if necessary\n **/\nToken.prototype.attrPush = function attrPush(attrData) {\n if (this.attrs) {\n this.attrs.push(attrData);\n } else {\n this.attrs = [ attrData ];\n }\n};\n\n\n/**\n * Token.attrSet(name, value)\n *\n * Set `name` attribute to `value`. Override old value if exists.\n **/\nToken.prototype.attrSet = function attrSet(name, value) {\n var idx = this.attrIndex(name),\n attrData = [ name, value ];\n\n if (idx < 0) {\n this.attrPush(attrData);\n } else {\n this.attrs[idx] = attrData;\n }\n};\n\n\n/**\n * Token.attrGet(name)\n *\n * Get the value of attribute `name`, or null if it does not exist.\n **/\nToken.prototype.attrGet = function attrGet(name) {\n var idx = this.attrIndex(name), value = null;\n if (idx >= 0) {\n value = this.attrs[idx][1];\n }\n return value;\n};\n\n\n/**\n * Token.attrJoin(name, value)\n *\n * Join value to existing attribute via space. Or create new attribute if not\n * exists. Useful to operate with token classes.\n **/\nToken.prototype.attrJoin = function attrJoin(name, value) {\n var idx = this.attrIndex(name);\n\n if (idx < 0) {\n this.attrPush([ name, value ]);\n } else {\n this.attrs[idx][1] = this.attrs[idx][1] + ' ' + value;\n }\n};\n\n\nmodule.exports = Token;\n","// Core state object\n//\n'use strict';\n\nvar Token = require('../token');\n\n\nfunction StateCore(src, md, env) {\n this.src = src;\n this.env = env;\n this.tokens = [];\n this.inlineMode = false;\n this.md = md; // link to parser instance\n}\n\n// re-export Token class to use in core rules\nStateCore.prototype.Token = Token;\n\n\nmodule.exports = StateCore;\n","/** internal\n * class Core\n *\n * Top-level rules executor. Glues block/inline parsers and does intermediate\n * transformations.\n **/\n'use strict';\n\n\nvar Ruler = require('./ruler');\n\n\nvar _rules = [\n [ 'normalize', require('./rules_core/normalize') ],\n [ 'block', require('./rules_core/block') ],\n [ 'inline', require('./rules_core/inline') ],\n [ 'linkify', require('./rules_core/linkify') ],\n [ 'replacements', require('./rules_core/replacements') ],\n [ 'smartquotes', require('./rules_core/smartquotes') ],\n // `text_join` finds `text_special` tokens (for escape sequences)\n // and joins them with the rest of the text\n [ 'text_join', require('./rules_core/text_join') ]\n];\n\n\n/**\n * new Core()\n **/\nfunction Core() {\n /**\n * Core#ruler -> Ruler\n *\n * [[Ruler]] instance. Keep configuration of core rules.\n **/\n this.ruler = new Ruler();\n\n for (var i = 0; i < _rules.length; i++) {\n this.ruler.push(_rules[i][0], _rules[i][1]);\n }\n}\n\n\n/**\n * Core.process(state)\n *\n * Executes core chain rules.\n **/\nCore.prototype.process = function (state) {\n var i, l, rules;\n\n rules = this.ruler.getRules('');\n\n for (i = 0, l = rules.length; i < l; i++) {\n rules[i](state);\n }\n};\n\nCore.prototype.State = require('./rules_core/state_core');\n\n\nmodule.exports = Core;\n","// GFM table, https://github.github.com/gfm/#tables-extension-\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\nfunction getLine(state, line) {\n var pos = state.bMarks[line] + state.tShift[line],\n max = state.eMarks[line];\n\n return state.src.slice(pos, max);\n}\n\nfunction escapedSplit(str) {\n var result = [],\n pos = 0,\n max = str.length,\n ch,\n isEscaped = false,\n lastPos = 0,\n current = '';\n\n ch = str.charCodeAt(pos);\n\n while (pos < max) {\n if (ch === 0x7c/* | */) {\n if (!isEscaped) {\n // pipe separating cells, '|'\n result.push(current + str.substring(lastPos, pos));\n current = '';\n lastPos = pos + 1;\n } else {\n // escaped pipe, '\\|'\n current += str.substring(lastPos, pos - 1);\n lastPos = pos;\n }\n }\n\n isEscaped = (ch === 0x5c/* \\ */);\n pos++;\n\n ch = str.charCodeAt(pos);\n }\n\n result.push(current + str.substring(lastPos));\n\n return result;\n}\n\n\nmodule.exports = function table(state, startLine, endLine, silent) {\n var ch, lineText, pos, i, l, nextLine, columns, columnCount, token,\n aligns, t, tableLines, tbodyLines, oldParentType, terminate,\n terminatorRules, firstCh, secondCh;\n\n // should have at least two lines\n if (startLine + 2 > endLine) { return false; }\n\n nextLine = startLine + 1;\n\n if (state.sCount[nextLine] < state.blkIndent) { return false; }\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[nextLine] - state.blkIndent >= 4) { return false; }\n\n // first character of the second line should be '|', '-', ':',\n // and no other characters are allowed but spaces;\n // basically, this is the equivalent of /^[-:|][-:|\\s]*$/ regexp\n\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n if (pos >= state.eMarks[nextLine]) { return false; }\n\n firstCh = state.src.charCodeAt(pos++);\n if (firstCh !== 0x7C/* | */ && firstCh !== 0x2D/* - */ && firstCh !== 0x3A/* : */) { return false; }\n\n if (pos >= state.eMarks[nextLine]) { return false; }\n\n secondCh = state.src.charCodeAt(pos++);\n if (secondCh !== 0x7C/* | */ && secondCh !== 0x2D/* - */ && secondCh !== 0x3A/* : */ && !isSpace(secondCh)) {\n return false;\n }\n\n // if first character is '-', then second character must not be a space\n // (due to parsing ambiguity with list)\n if (firstCh === 0x2D/* - */ && isSpace(secondCh)) { return false; }\n\n while (pos < state.eMarks[nextLine]) {\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x7C/* | */ && ch !== 0x2D/* - */ && ch !== 0x3A/* : */ && !isSpace(ch)) { return false; }\n\n pos++;\n }\n\n lineText = getLine(state, startLine + 1);\n\n columns = lineText.split('|');\n aligns = [];\n for (i = 0; i < columns.length; i++) {\n t = columns[i].trim();\n if (!t) {\n // allow empty columns before and after table, but not in between columns;\n // e.g. allow ` |---| `, disallow ` ---||--- `\n if (i === 0 || i === columns.length - 1) {\n continue;\n } else {\n return false;\n }\n }\n\n if (!/^:?-+:?$/.test(t)) { return false; }\n if (t.charCodeAt(t.length - 1) === 0x3A/* : */) {\n aligns.push(t.charCodeAt(0) === 0x3A/* : */ ? 'center' : 'right');\n } else if (t.charCodeAt(0) === 0x3A/* : */) {\n aligns.push('left');\n } else {\n aligns.push('');\n }\n }\n\n lineText = getLine(state, startLine).trim();\n if (lineText.indexOf('|') === -1) { return false; }\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n columns = escapedSplit(lineText);\n if (columns.length && columns[0] === '') columns.shift();\n if (columns.length && columns[columns.length - 1] === '') columns.pop();\n\n // header row will define an amount of columns in the entire table,\n // and align row should be exactly the same (the rest of the rows can differ)\n columnCount = columns.length;\n if (columnCount === 0 || columnCount !== aligns.length) { return false; }\n\n if (silent) { return true; }\n\n oldParentType = state.parentType;\n state.parentType = 'table';\n\n // use 'blockquote' lists for termination because it's\n // the most similar to tables\n terminatorRules = state.md.block.ruler.getRules('blockquote');\n\n token = state.push('table_open', 'table', 1);\n token.map = tableLines = [ startLine, 0 ];\n\n token = state.push('thead_open', 'thead', 1);\n token.map = [ startLine, startLine + 1 ];\n\n token = state.push('tr_open', 'tr', 1);\n token.map = [ startLine, startLine + 1 ];\n\n for (i = 0; i < columns.length; i++) {\n token = state.push('th_open', 'th', 1);\n if (aligns[i]) {\n token.attrs = [ [ 'style', 'text-align:' + aligns[i] ] ];\n }\n\n token = state.push('inline', '', 0);\n token.content = columns[i].trim();\n token.children = [];\n\n token = state.push('th_close', 'th', -1);\n }\n\n token = state.push('tr_close', 'tr', -1);\n token = state.push('thead_close', 'thead', -1);\n\n for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {\n if (state.sCount[nextLine] < state.blkIndent) { break; }\n\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) { break; }\n lineText = getLine(state, nextLine).trim();\n if (!lineText) { break; }\n if (state.sCount[nextLine] - state.blkIndent >= 4) { break; }\n columns = escapedSplit(lineText);\n if (columns.length && columns[0] === '') columns.shift();\n if (columns.length && columns[columns.length - 1] === '') columns.pop();\n\n if (nextLine === startLine + 2) {\n token = state.push('tbody_open', 'tbody', 1);\n token.map = tbodyLines = [ startLine + 2, 0 ];\n }\n\n token = state.push('tr_open', 'tr', 1);\n token.map = [ nextLine, nextLine + 1 ];\n\n for (i = 0; i < columnCount; i++) {\n token = state.push('td_open', 'td', 1);\n if (aligns[i]) {\n token.attrs = [ [ 'style', 'text-align:' + aligns[i] ] ];\n }\n\n token = state.push('inline', '', 0);\n token.content = columns[i] ? columns[i].trim() : '';\n token.children = [];\n\n token = state.push('td_close', 'td', -1);\n }\n token = state.push('tr_close', 'tr', -1);\n }\n\n if (tbodyLines) {\n token = state.push('tbody_close', 'tbody', -1);\n tbodyLines[1] = nextLine;\n }\n\n token = state.push('table_close', 'table', -1);\n tableLines[1] = nextLine;\n\n state.parentType = oldParentType;\n state.line = nextLine;\n return true;\n};\n","// Code block (4 spaces padded)\n\n'use strict';\n\n\nmodule.exports = function code(state, startLine, endLine/*, silent*/) {\n var nextLine, last, token;\n\n if (state.sCount[startLine] - state.blkIndent < 4) { return false; }\n\n last = nextLine = startLine + 1;\n\n while (nextLine < endLine) {\n if (state.isEmpty(nextLine)) {\n nextLine++;\n continue;\n }\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n nextLine++;\n last = nextLine;\n continue;\n }\n break;\n }\n\n state.line = last;\n\n token = state.push('code_block', 'code', 0);\n token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + '\\n';\n token.map = [ startLine, state.line ];\n\n return true;\n};\n","// fences (``` lang, ~~~ lang)\n\n'use strict';\n\n\nmodule.exports = function fence(state, startLine, endLine, silent) {\n var marker, len, params, nextLine, mem, token, markup,\n haveEndMarker = false,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine];\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n if (pos + 3 > max) { return false; }\n\n marker = state.src.charCodeAt(pos);\n\n if (marker !== 0x7E/* ~ */ && marker !== 0x60 /* ` */) {\n return false;\n }\n\n // scan marker length\n mem = pos;\n pos = state.skipChars(pos, marker);\n\n len = pos - mem;\n\n if (len < 3) { return false; }\n\n markup = state.src.slice(mem, pos);\n params = state.src.slice(pos, max);\n\n if (marker === 0x60 /* ` */) {\n if (params.indexOf(String.fromCharCode(marker)) >= 0) {\n return false;\n }\n }\n\n // Since start is found, we can report success here in validation mode\n if (silent) { return true; }\n\n // search end of block\n nextLine = startLine;\n\n for (;;) {\n nextLine++;\n if (nextLine >= endLine) {\n // unclosed block should be autoclosed by end of document.\n // also block seems to be autoclosed by end of parent\n break;\n }\n\n pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos < max && state.sCount[nextLine] < state.blkIndent) {\n // non-empty line with negative indent should stop the list:\n // - ```\n // test\n break;\n }\n\n if (state.src.charCodeAt(pos) !== marker) { continue; }\n\n if (state.sCount[nextLine] - state.blkIndent >= 4) {\n // closing fence should be indented less than 4 spaces\n continue;\n }\n\n pos = state.skipChars(pos, marker);\n\n // closing code fence must be at least as long as the opening one\n if (pos - mem < len) { continue; }\n\n // make sure tail has spaces only\n pos = state.skipSpaces(pos);\n\n if (pos < max) { continue; }\n\n haveEndMarker = true;\n // found!\n break;\n }\n\n // If a fence has heading spaces, they should be removed from its inner block\n len = state.sCount[startLine];\n\n state.line = nextLine + (haveEndMarker ? 1 : 0);\n\n token = state.push('fence', 'code', 0);\n token.info = params;\n token.content = state.getLines(startLine + 1, nextLine, len, true);\n token.markup = markup;\n token.map = [ startLine, state.line ];\n\n return true;\n};\n","// Block quotes\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\nmodule.exports = function blockquote(state, startLine, endLine, silent) {\n var adjustTab,\n ch,\n i,\n initial,\n l,\n lastLineEmpty,\n lines,\n nextLine,\n offset,\n oldBMarks,\n oldBSCount,\n oldIndent,\n oldParentType,\n oldSCount,\n oldTShift,\n spaceAfterMarker,\n terminate,\n terminatorRules,\n token,\n isOutdented,\n oldLineMax = state.lineMax,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine];\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n // check the block quote marker\n if (state.src.charCodeAt(pos++) !== 0x3E/* > */) { return false; }\n\n // we know that it's going to be a valid blockquote,\n // so no point trying to find the end of it in silent mode\n if (silent) { return true; }\n\n // set offset past spaces and \">\"\n initial = offset = state.sCount[startLine] + 1;\n\n // skip one optional space after '>'\n if (state.src.charCodeAt(pos) === 0x20 /* space */) {\n // ' > test '\n // ^ -- position start of line here:\n pos++;\n initial++;\n offset++;\n adjustTab = false;\n spaceAfterMarker = true;\n } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {\n spaceAfterMarker = true;\n\n if ((state.bsCount[startLine] + offset) % 4 === 3) {\n // ' >\\t test '\n // ^ -- position start of line here (tab has width===1)\n pos++;\n initial++;\n offset++;\n adjustTab = false;\n } else {\n // ' >\\t test '\n // ^ -- position start of line here + shift bsCount slightly\n // to make extra space appear\n adjustTab = true;\n }\n } else {\n spaceAfterMarker = false;\n }\n\n oldBMarks = [ state.bMarks[startLine] ];\n state.bMarks[startLine] = pos;\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (isSpace(ch)) {\n if (ch === 0x09) {\n offset += 4 - (offset + state.bsCount[startLine] + (adjustTab ? 1 : 0)) % 4;\n } else {\n offset++;\n }\n } else {\n break;\n }\n\n pos++;\n }\n\n oldBSCount = [ state.bsCount[startLine] ];\n state.bsCount[startLine] = state.sCount[startLine] + 1 + (spaceAfterMarker ? 1 : 0);\n\n lastLineEmpty = pos >= max;\n\n oldSCount = [ state.sCount[startLine] ];\n state.sCount[startLine] = offset - initial;\n\n oldTShift = [ state.tShift[startLine] ];\n state.tShift[startLine] = pos - state.bMarks[startLine];\n\n terminatorRules = state.md.block.ruler.getRules('blockquote');\n\n oldParentType = state.parentType;\n state.parentType = 'blockquote';\n\n // Search the end of the block\n //\n // Block ends with either:\n // 1. an empty line outside:\n // ```\n // > test\n //\n // ```\n // 2. an empty line inside:\n // ```\n // >\n // test\n // ```\n // 3. another tag:\n // ```\n // > test\n // - - -\n // ```\n for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {\n // check if it's outdented, i.e. it's inside list item and indented\n // less than said list item:\n //\n // ```\n // 1. anything\n // > current blockquote\n // 2. checking this line\n // ```\n isOutdented = state.sCount[nextLine] < state.blkIndent;\n\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos >= max) {\n // Case 1: line is not inside the blockquote, and this line is empty.\n break;\n }\n\n if (state.src.charCodeAt(pos++) === 0x3E/* > */ && !isOutdented) {\n // This line is inside the blockquote.\n\n // set offset past spaces and \">\"\n initial = offset = state.sCount[nextLine] + 1;\n\n // skip one optional space after '>'\n if (state.src.charCodeAt(pos) === 0x20 /* space */) {\n // ' > test '\n // ^ -- position start of line here:\n pos++;\n initial++;\n offset++;\n adjustTab = false;\n spaceAfterMarker = true;\n } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {\n spaceAfterMarker = true;\n\n if ((state.bsCount[nextLine] + offset) % 4 === 3) {\n // ' >\\t test '\n // ^ -- position start of line here (tab has width===1)\n pos++;\n initial++;\n offset++;\n adjustTab = false;\n } else {\n // ' >\\t test '\n // ^ -- position start of line here + shift bsCount slightly\n // to make extra space appear\n adjustTab = true;\n }\n } else {\n spaceAfterMarker = false;\n }\n\n oldBMarks.push(state.bMarks[nextLine]);\n state.bMarks[nextLine] = pos;\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (isSpace(ch)) {\n if (ch === 0x09) {\n offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;\n } else {\n offset++;\n }\n } else {\n break;\n }\n\n pos++;\n }\n\n lastLineEmpty = pos >= max;\n\n oldBSCount.push(state.bsCount[nextLine]);\n state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);\n\n oldSCount.push(state.sCount[nextLine]);\n state.sCount[nextLine] = offset - initial;\n\n oldTShift.push(state.tShift[nextLine]);\n state.tShift[nextLine] = pos - state.bMarks[nextLine];\n continue;\n }\n\n // Case 2: line is not inside the blockquote, and the last line was empty.\n if (lastLineEmpty) { break; }\n\n // Case 3: another tag found.\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n\n if (terminate) {\n // Quirk to enforce \"hard termination mode\" for paragraphs;\n // normally if you call `tokenize(state, startLine, nextLine)`,\n // paragraphs will look below nextLine for paragraph continuation,\n // but if blockquote is terminated by another tag, they shouldn't\n state.lineMax = nextLine;\n\n if (state.blkIndent !== 0) {\n // state.blkIndent was non-zero, we now set it to zero,\n // so we need to re-calculate all offsets to appear as\n // if indent wasn't changed\n oldBMarks.push(state.bMarks[nextLine]);\n oldBSCount.push(state.bsCount[nextLine]);\n oldTShift.push(state.tShift[nextLine]);\n oldSCount.push(state.sCount[nextLine]);\n state.sCount[nextLine] -= state.blkIndent;\n }\n\n break;\n }\n\n oldBMarks.push(state.bMarks[nextLine]);\n oldBSCount.push(state.bsCount[nextLine]);\n oldTShift.push(state.tShift[nextLine]);\n oldSCount.push(state.sCount[nextLine]);\n\n // A negative indentation means that this is a paragraph continuation\n //\n state.sCount[nextLine] = -1;\n }\n\n oldIndent = state.blkIndent;\n state.blkIndent = 0;\n\n token = state.push('blockquote_open', 'blockquote', 1);\n token.markup = '>';\n token.map = lines = [ startLine, 0 ];\n\n state.md.block.tokenize(state, startLine, nextLine);\n\n token = state.push('blockquote_close', 'blockquote', -1);\n token.markup = '>';\n\n state.lineMax = oldLineMax;\n state.parentType = oldParentType;\n lines[1] = state.line;\n\n // Restore original tShift; this might not be necessary since the parser\n // has already been here, but just to make sure we can do that.\n for (i = 0; i < oldTShift.length; i++) {\n state.bMarks[i + startLine] = oldBMarks[i];\n state.tShift[i + startLine] = oldTShift[i];\n state.sCount[i + startLine] = oldSCount[i];\n state.bsCount[i + startLine] = oldBSCount[i];\n }\n state.blkIndent = oldIndent;\n\n return true;\n};\n","// Horizontal rule\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\nmodule.exports = function hr(state, startLine, endLine, silent) {\n var marker, cnt, ch, token,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine];\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n marker = state.src.charCodeAt(pos++);\n\n // Check hr marker\n if (marker !== 0x2A/* * */ &&\n marker !== 0x2D/* - */ &&\n marker !== 0x5F/* _ */) {\n return false;\n }\n\n // markers can be mixed with spaces, but there should be at least 3 of them\n\n cnt = 1;\n while (pos < max) {\n ch = state.src.charCodeAt(pos++);\n if (ch !== marker && !isSpace(ch)) { return false; }\n if (ch === marker) { cnt++; }\n }\n\n if (cnt < 3) { return false; }\n\n if (silent) { return true; }\n\n state.line = startLine + 1;\n\n token = state.push('hr', 'hr', 0);\n token.map = [ startLine, state.line ];\n token.markup = Array(cnt + 1).join(String.fromCharCode(marker));\n\n return true;\n};\n","// Lists\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\n// Search `[-+*][\\n ]`, returns next pos after marker on success\n// or -1 on fail.\nfunction skipBulletListMarker(state, startLine) {\n var marker, pos, max, ch;\n\n pos = state.bMarks[startLine] + state.tShift[startLine];\n max = state.eMarks[startLine];\n\n marker = state.src.charCodeAt(pos++);\n // Check bullet\n if (marker !== 0x2A/* * */ &&\n marker !== 0x2D/* - */ &&\n marker !== 0x2B/* + */) {\n return -1;\n }\n\n if (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n // \" -test \" - is not a list item\n return -1;\n }\n }\n\n return pos;\n}\n\n// Search `\\d+[.)][\\n ]`, returns next pos after marker on success\n// or -1 on fail.\nfunction skipOrderedListMarker(state, startLine) {\n var ch,\n start = state.bMarks[startLine] + state.tShift[startLine],\n pos = start,\n max = state.eMarks[startLine];\n\n // List marker should have at least 2 chars (digit + dot)\n if (pos + 1 >= max) { return -1; }\n\n ch = state.src.charCodeAt(pos++);\n\n if (ch < 0x30/* 0 */ || ch > 0x39/* 9 */) { return -1; }\n\n for (;;) {\n // EOL -> fail\n if (pos >= max) { return -1; }\n\n ch = state.src.charCodeAt(pos++);\n\n if (ch >= 0x30/* 0 */ && ch <= 0x39/* 9 */) {\n\n // List marker should have no more than 9 digits\n // (prevents integer overflow in browsers)\n if (pos - start >= 10) { return -1; }\n\n continue;\n }\n\n // found valid marker\n if (ch === 0x29/* ) */ || ch === 0x2e/* . */) {\n break;\n }\n\n return -1;\n }\n\n\n if (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (!isSpace(ch)) {\n // \" 1.test \" - is not a list item\n return -1;\n }\n }\n return pos;\n}\n\nfunction markTightParagraphs(state, idx) {\n var i, l,\n level = state.level + 2;\n\n for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) {\n if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {\n state.tokens[i + 2].hidden = true;\n state.tokens[i].hidden = true;\n i += 2;\n }\n }\n}\n\n\nmodule.exports = function list(state, startLine, endLine, silent) {\n var ch,\n contentStart,\n i,\n indent,\n indentAfterMarker,\n initial,\n isOrdered,\n itemLines,\n l,\n listLines,\n listTokIdx,\n markerCharCode,\n markerValue,\n max,\n nextLine,\n offset,\n oldListIndent,\n oldParentType,\n oldSCount,\n oldTShift,\n oldTight,\n pos,\n posAfterMarker,\n prevEmptyEnd,\n start,\n terminate,\n terminatorRules,\n token,\n isTerminatingParagraph = false,\n tight = true;\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n // Special case:\n // - item 1\n // - item 2\n // - item 3\n // - item 4\n // - this one is a paragraph continuation\n if (state.listIndent >= 0 &&\n state.sCount[startLine] - state.listIndent >= 4 &&\n state.sCount[startLine] < state.blkIndent) {\n return false;\n }\n\n // limit conditions when list can interrupt\n // a paragraph (validation mode only)\n if (silent && state.parentType === 'paragraph') {\n // Next list item should still terminate previous list item;\n //\n // This code can fail if plugins use blkIndent as well as lists,\n // but I hope the spec gets fixed long before that happens.\n //\n if (state.sCount[startLine] >= state.blkIndent) {\n isTerminatingParagraph = true;\n }\n }\n\n // Detect list type and position after marker\n if ((posAfterMarker = skipOrderedListMarker(state, startLine)) >= 0) {\n isOrdered = true;\n start = state.bMarks[startLine] + state.tShift[startLine];\n markerValue = Number(state.src.slice(start, posAfterMarker - 1));\n\n // If we're starting a new ordered list right after\n // a paragraph, it should start with 1.\n if (isTerminatingParagraph && markerValue !== 1) return false;\n\n } else if ((posAfterMarker = skipBulletListMarker(state, startLine)) >= 0) {\n isOrdered = false;\n\n } else {\n return false;\n }\n\n // If we're starting a new unordered list right after\n // a paragraph, first line should not be empty.\n if (isTerminatingParagraph) {\n if (state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]) return false;\n }\n\n // We should terminate list on style change. Remember first one to compare.\n markerCharCode = state.src.charCodeAt(posAfterMarker - 1);\n\n // For validation mode we can terminate immediately\n if (silent) { return true; }\n\n // Start list\n listTokIdx = state.tokens.length;\n\n if (isOrdered) {\n token = state.push('ordered_list_open', 'ol', 1);\n if (markerValue !== 1) {\n token.attrs = [ [ 'start', markerValue ] ];\n }\n\n } else {\n token = state.push('bullet_list_open', 'ul', 1);\n }\n\n token.map = listLines = [ startLine, 0 ];\n token.markup = String.fromCharCode(markerCharCode);\n\n //\n // Iterate list items\n //\n\n nextLine = startLine;\n prevEmptyEnd = false;\n terminatorRules = state.md.block.ruler.getRules('list');\n\n oldParentType = state.parentType;\n state.parentType = 'list';\n\n while (nextLine < endLine) {\n pos = posAfterMarker;\n max = state.eMarks[nextLine];\n\n initial = offset = state.sCount[nextLine] + posAfterMarker - (state.bMarks[startLine] + state.tShift[startLine]);\n\n while (pos < max) {\n ch = state.src.charCodeAt(pos);\n\n if (ch === 0x09) {\n offset += 4 - (offset + state.bsCount[nextLine]) % 4;\n } else if (ch === 0x20) {\n offset++;\n } else {\n break;\n }\n\n pos++;\n }\n\n contentStart = pos;\n\n if (contentStart >= max) {\n // trimming space in \"- \\n 3\" case, indent is 1 here\n indentAfterMarker = 1;\n } else {\n indentAfterMarker = offset - initial;\n }\n\n // If we have more than 4 spaces, the indent is 1\n // (the rest is just indented code block)\n if (indentAfterMarker > 4) { indentAfterMarker = 1; }\n\n // \" - test\"\n // ^^^^^ - calculating total length of this thing\n indent = initial + indentAfterMarker;\n\n // Run subparser & write tokens\n token = state.push('list_item_open', 'li', 1);\n token.markup = String.fromCharCode(markerCharCode);\n token.map = itemLines = [ startLine, 0 ];\n if (isOrdered) {\n token.info = state.src.slice(start, posAfterMarker - 1);\n }\n\n // change current state, then restore it after parser subcall\n oldTight = state.tight;\n oldTShift = state.tShift[startLine];\n oldSCount = state.sCount[startLine];\n\n // - example list\n // ^ listIndent position will be here\n // ^ blkIndent position will be here\n //\n oldListIndent = state.listIndent;\n state.listIndent = state.blkIndent;\n state.blkIndent = indent;\n\n state.tight = true;\n state.tShift[startLine] = contentStart - state.bMarks[startLine];\n state.sCount[startLine] = offset;\n\n if (contentStart >= max && state.isEmpty(startLine + 1)) {\n // workaround for this case\n // (list item is empty, list terminates before \"foo\"):\n // ~~~~~~~~\n // -\n //\n // foo\n // ~~~~~~~~\n state.line = Math.min(state.line + 2, endLine);\n } else {\n state.md.block.tokenize(state, startLine, endLine, true);\n }\n\n // If any of list item is tight, mark list as tight\n if (!state.tight || prevEmptyEnd) {\n tight = false;\n }\n // Item become loose if finish with empty line,\n // but we should filter last element, because it means list finish\n prevEmptyEnd = (state.line - startLine) > 1 && state.isEmpty(state.line - 1);\n\n state.blkIndent = state.listIndent;\n state.listIndent = oldListIndent;\n state.tShift[startLine] = oldTShift;\n state.sCount[startLine] = oldSCount;\n state.tight = oldTight;\n\n token = state.push('list_item_close', 'li', -1);\n token.markup = String.fromCharCode(markerCharCode);\n\n nextLine = startLine = state.line;\n itemLines[1] = nextLine;\n contentStart = state.bMarks[startLine];\n\n if (nextLine >= endLine) { break; }\n\n //\n // Try to check if list is terminated or continued.\n //\n if (state.sCount[nextLine] < state.blkIndent) { break; }\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { break; }\n\n // fail if terminating block found\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n if (terminate) { break; }\n\n // fail if list has another type\n if (isOrdered) {\n posAfterMarker = skipOrderedListMarker(state, nextLine);\n if (posAfterMarker < 0) { break; }\n start = state.bMarks[nextLine] + state.tShift[nextLine];\n } else {\n posAfterMarker = skipBulletListMarker(state, nextLine);\n if (posAfterMarker < 0) { break; }\n }\n\n if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) { break; }\n }\n\n // Finalize list\n if (isOrdered) {\n token = state.push('ordered_list_close', 'ol', -1);\n } else {\n token = state.push('bullet_list_close', 'ul', -1);\n }\n token.markup = String.fromCharCode(markerCharCode);\n\n listLines[1] = nextLine;\n state.line = nextLine;\n\n state.parentType = oldParentType;\n\n // mark paragraphs tight if needed\n if (tight) {\n markTightParagraphs(state, listTokIdx);\n }\n\n return true;\n};\n","'use strict';\n\n\nvar normalizeReference = require('../common/utils').normalizeReference;\nvar isSpace = require('../common/utils').isSpace;\n\n\nmodule.exports = function reference(state, startLine, _endLine, silent) {\n var ch,\n destEndPos,\n destEndLineNo,\n endLine,\n href,\n i,\n l,\n label,\n labelEnd,\n oldParentType,\n res,\n start,\n str,\n terminate,\n terminatorRules,\n title,\n lines = 0,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine],\n nextLine = startLine + 1;\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n if (state.src.charCodeAt(pos) !== 0x5B/* [ */) { return false; }\n\n // Simple check to quickly interrupt scan on [link](url) at the start of line.\n // Can be useful on practice: https://github.com/markdown-it/markdown-it/issues/54\n while (++pos < max) {\n if (state.src.charCodeAt(pos) === 0x5D /* ] */ &&\n state.src.charCodeAt(pos - 1) !== 0x5C/* \\ */) {\n if (pos + 1 === max) { return false; }\n if (state.src.charCodeAt(pos + 1) !== 0x3A/* : */) { return false; }\n break;\n }\n }\n\n endLine = state.lineMax;\n\n // jump line-by-line until empty one or EOF\n terminatorRules = state.md.block.ruler.getRules('reference');\n\n oldParentType = state.parentType;\n state.parentType = 'reference';\n\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }\n\n // quirk for blockquotes, this line should already be checked by that rule\n if (state.sCount[nextLine] < 0) { continue; }\n\n // Some tags can terminate paragraph without empty line.\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n if (terminate) { break; }\n }\n\n str = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n max = str.length;\n\n for (pos = 1; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n if (ch === 0x5B /* [ */) {\n return false;\n } else if (ch === 0x5D /* ] */) {\n labelEnd = pos;\n break;\n } else if (ch === 0x0A /* \\n */) {\n lines++;\n } else if (ch === 0x5C /* \\ */) {\n pos++;\n if (pos < max && str.charCodeAt(pos) === 0x0A) {\n lines++;\n }\n }\n }\n\n if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A/* : */) { return false; }\n\n // [label]: destination 'title'\n // ^^^ skip optional whitespace here\n for (pos = labelEnd + 2; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n if (ch === 0x0A) {\n lines++;\n } else if (isSpace(ch)) {\n /*eslint no-empty:0*/\n } else {\n break;\n }\n }\n\n // [label]: destination 'title'\n // ^^^^^^^^^^^ parse this\n res = state.md.helpers.parseLinkDestination(str, pos, max);\n if (!res.ok) { return false; }\n\n href = state.md.normalizeLink(res.str);\n if (!state.md.validateLink(href)) { return false; }\n\n pos = res.pos;\n lines += res.lines;\n\n // save cursor state, we could require to rollback later\n destEndPos = pos;\n destEndLineNo = lines;\n\n // [label]: destination 'title'\n // ^^^ skipping those spaces\n start = pos;\n for (; pos < max; pos++) {\n ch = str.charCodeAt(pos);\n if (ch === 0x0A) {\n lines++;\n } else if (isSpace(ch)) {\n /*eslint no-empty:0*/\n } else {\n break;\n }\n }\n\n // [label]: destination 'title'\n // ^^^^^^^ parse this\n res = state.md.helpers.parseLinkTitle(str, pos, max);\n if (pos < max && start !== pos && res.ok) {\n title = res.str;\n pos = res.pos;\n lines += res.lines;\n } else {\n title = '';\n pos = destEndPos;\n lines = destEndLineNo;\n }\n\n // skip trailing spaces until the rest of the line\n while (pos < max) {\n ch = str.charCodeAt(pos);\n if (!isSpace(ch)) { break; }\n pos++;\n }\n\n if (pos < max && str.charCodeAt(pos) !== 0x0A) {\n if (title) {\n // garbage at the end of the line after title,\n // but it could still be a valid reference if we roll back\n title = '';\n pos = destEndPos;\n lines = destEndLineNo;\n while (pos < max) {\n ch = str.charCodeAt(pos);\n if (!isSpace(ch)) { break; }\n pos++;\n }\n }\n }\n\n if (pos < max && str.charCodeAt(pos) !== 0x0A) {\n // garbage at the end of the line\n return false;\n }\n\n label = normalizeReference(str.slice(1, labelEnd));\n if (!label) {\n // CommonMark 0.20 disallows empty labels\n return false;\n }\n\n // Reference can not terminate anything. This check is for safety only.\n /*istanbul ignore if*/\n if (silent) { return true; }\n\n if (typeof state.env.references === 'undefined') {\n state.env.references = {};\n }\n if (typeof state.env.references[label] === 'undefined') {\n state.env.references[label] = { title: title, href: href };\n }\n\n state.parentType = oldParentType;\n\n state.line = startLine + lines + 1;\n return true;\n};\n","// List of valid html blocks names, accorting to commonmark spec\n// http://jgm.github.io/CommonMark/spec.html#html-blocks\n\n'use strict';\n\n\nmodule.exports = [\n 'address',\n 'article',\n 'aside',\n 'base',\n 'basefont',\n 'blockquote',\n 'body',\n 'caption',\n 'center',\n 'col',\n 'colgroup',\n 'dd',\n 'details',\n 'dialog',\n 'dir',\n 'div',\n 'dl',\n 'dt',\n 'fieldset',\n 'figcaption',\n 'figure',\n 'footer',\n 'form',\n 'frame',\n 'frameset',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'head',\n 'header',\n 'hr',\n 'html',\n 'iframe',\n 'legend',\n 'li',\n 'link',\n 'main',\n 'menu',\n 'menuitem',\n 'nav',\n 'noframes',\n 'ol',\n 'optgroup',\n 'option',\n 'p',\n 'param',\n 'section',\n 'source',\n 'summary',\n 'table',\n 'tbody',\n 'td',\n 'tfoot',\n 'th',\n 'thead',\n 'title',\n 'tr',\n 'track',\n 'ul'\n];\n","// Regexps to match html elements\n\n'use strict';\n\nvar attr_name = '[a-zA-Z_:][a-zA-Z0-9:._-]*';\n\nvar unquoted = '[^\"\\'=<>`\\\\x00-\\\\x20]+';\nvar single_quoted = \"'[^']*'\";\nvar double_quoted = '\"[^\"]*\"';\n\nvar attr_value = '(?:' + unquoted + '|' + single_quoted + '|' + double_quoted + ')';\n\nvar attribute = '(?:\\\\s+' + attr_name + '(?:\\\\s*=\\\\s*' + attr_value + ')?)';\n\nvar open_tag = '<[A-Za-z][A-Za-z0-9\\\\-]*' + attribute + '*\\\\s*\\\\/?>';\n\nvar close_tag = '<\\\\/[A-Za-z][A-Za-z0-9\\\\-]*\\\\s*>';\nvar comment = '|';\nvar processing = '<[?][\\\\s\\\\S]*?[?]>';\nvar declaration = ']*>';\nvar cdata = '';\n\nvar HTML_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + '|' + comment +\n '|' + processing + '|' + declaration + '|' + cdata + ')');\nvar HTML_OPEN_CLOSE_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + ')');\n\nmodule.exports.HTML_TAG_RE = HTML_TAG_RE;\nmodule.exports.HTML_OPEN_CLOSE_TAG_RE = HTML_OPEN_CLOSE_TAG_RE;\n","// HTML block\n\n'use strict';\n\n\nvar block_names = require('../common/html_blocks');\nvar HTML_OPEN_CLOSE_TAG_RE = require('../common/html_re').HTML_OPEN_CLOSE_TAG_RE;\n\n// An array of opening and corresponding closing sequences for html tags,\n// last argument defines whether it can terminate a paragraph or not\n//\nvar HTML_SEQUENCES = [\n [ /^<(script|pre|style|textarea)(?=(\\s|>|$))/i, /<\\/(script|pre|style|textarea)>/i, true ],\n [ /^/, true ],\n [ /^<\\?/, /\\?>/, true ],\n [ /^/, true ],\n [ /^/, true ],\n [ new RegExp('^?(' + block_names.join('|') + ')(?=(\\\\s|/?>|$))', 'i'), /^$/, true ],\n [ new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\\\s*$'), /^$/, false ]\n];\n\n\nmodule.exports = function html_block(state, startLine, endLine, silent) {\n var i, nextLine, token, lineText,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine];\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n if (!state.md.options.html) { return false; }\n\n if (state.src.charCodeAt(pos) !== 0x3C/* < */) { return false; }\n\n lineText = state.src.slice(pos, max);\n\n for (i = 0; i < HTML_SEQUENCES.length; i++) {\n if (HTML_SEQUENCES[i][0].test(lineText)) { break; }\n }\n\n if (i === HTML_SEQUENCES.length) { return false; }\n\n if (silent) {\n // true if this sequence can be a terminator, false otherwise\n return HTML_SEQUENCES[i][2];\n }\n\n nextLine = startLine + 1;\n\n // If we are here - we detected HTML block.\n // Let's roll down till block end.\n if (!HTML_SEQUENCES[i][1].test(lineText)) {\n for (; nextLine < endLine; nextLine++) {\n if (state.sCount[nextLine] < state.blkIndent) { break; }\n\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n lineText = state.src.slice(pos, max);\n\n if (HTML_SEQUENCES[i][1].test(lineText)) {\n if (lineText.length !== 0) { nextLine++; }\n break;\n }\n }\n }\n\n state.line = nextLine;\n\n token = state.push('html_block', '', 0);\n token.map = [ startLine, nextLine ];\n token.content = state.getLines(startLine, nextLine, state.blkIndent, true);\n\n return true;\n};\n","// heading (#, ##, ...)\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\nmodule.exports = function heading(state, startLine, endLine, silent) {\n var ch, level, tmp, token,\n pos = state.bMarks[startLine] + state.tShift[startLine],\n max = state.eMarks[startLine];\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x23/* # */ || pos >= max) { return false; }\n\n // count heading level\n level = 1;\n ch = state.src.charCodeAt(++pos);\n while (ch === 0x23/* # */ && pos < max && level <= 6) {\n level++;\n ch = state.src.charCodeAt(++pos);\n }\n\n if (level > 6 || (pos < max && !isSpace(ch))) { return false; }\n\n if (silent) { return true; }\n\n // Let's cut tails like ' ### ' from the end of string\n\n max = state.skipSpacesBack(max, pos);\n tmp = state.skipCharsBack(max, 0x23, pos); // #\n if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {\n max = tmp;\n }\n\n state.line = startLine + 1;\n\n token = state.push('heading_open', 'h' + String(level), 1);\n token.markup = '########'.slice(0, level);\n token.map = [ startLine, state.line ];\n\n token = state.push('inline', '', 0);\n token.content = state.src.slice(pos, max).trim();\n token.map = [ startLine, state.line ];\n token.children = [];\n\n token = state.push('heading_close', 'h' + String(level), -1);\n token.markup = '########'.slice(0, level);\n\n return true;\n};\n","// lheading (---, ===)\n\n'use strict';\n\n\nmodule.exports = function lheading(state, startLine, endLine/*, silent*/) {\n var content, terminate, i, l, token, pos, max, level, marker,\n nextLine = startLine + 1, oldParentType,\n terminatorRules = state.md.block.ruler.getRules('paragraph');\n\n // if it's indented more than 3 spaces, it should be a code block\n if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }\n\n oldParentType = state.parentType;\n state.parentType = 'paragraph'; // use paragraph to match terminatorRules\n\n // jump line-by-line until empty one or EOF\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }\n\n //\n // Check for underline in setext header\n //\n if (state.sCount[nextLine] >= state.blkIndent) {\n pos = state.bMarks[nextLine] + state.tShift[nextLine];\n max = state.eMarks[nextLine];\n\n if (pos < max) {\n marker = state.src.charCodeAt(pos);\n\n if (marker === 0x2D/* - */ || marker === 0x3D/* = */) {\n pos = state.skipChars(pos, marker);\n pos = state.skipSpaces(pos);\n\n if (pos >= max) {\n level = (marker === 0x3D/* = */ ? 1 : 2);\n break;\n }\n }\n }\n }\n\n // quirk for blockquotes, this line should already be checked by that rule\n if (state.sCount[nextLine] < 0) { continue; }\n\n // Some tags can terminate paragraph without empty line.\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n if (terminate) { break; }\n }\n\n if (!level) {\n // Didn't find valid underline\n return false;\n }\n\n content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n\n state.line = nextLine + 1;\n\n token = state.push('heading_open', 'h' + String(level), 1);\n token.markup = String.fromCharCode(marker);\n token.map = [ startLine, state.line ];\n\n token = state.push('inline', '', 0);\n token.content = content;\n token.map = [ startLine, state.line - 1 ];\n token.children = [];\n\n token = state.push('heading_close', 'h' + String(level), -1);\n token.markup = String.fromCharCode(marker);\n\n state.parentType = oldParentType;\n\n return true;\n};\n","// Paragraph\n\n'use strict';\n\n\nmodule.exports = function paragraph(state, startLine/*, endLine*/) {\n var content, terminate, i, l, token, oldParentType,\n nextLine = startLine + 1,\n terminatorRules = state.md.block.ruler.getRules('paragraph'),\n endLine = state.lineMax;\n\n oldParentType = state.parentType;\n state.parentType = 'paragraph';\n\n // jump line-by-line until empty one or EOF\n for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {\n // this would be a code block normally, but after paragraph\n // it's considered a lazy continuation regardless of what's there\n if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }\n\n // quirk for blockquotes, this line should already be checked by that rule\n if (state.sCount[nextLine] < 0) { continue; }\n\n // Some tags can terminate paragraph without empty line.\n terminate = false;\n for (i = 0, l = terminatorRules.length; i < l; i++) {\n if (terminatorRules[i](state, nextLine, endLine, true)) {\n terminate = true;\n break;\n }\n }\n if (terminate) { break; }\n }\n\n content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();\n\n state.line = nextLine;\n\n token = state.push('paragraph_open', 'p', 1);\n token.map = [ startLine, state.line ];\n\n token = state.push('inline', '', 0);\n token.content = content;\n token.map = [ startLine, state.line ];\n token.children = [];\n\n token = state.push('paragraph_close', 'p', -1);\n\n state.parentType = oldParentType;\n\n return true;\n};\n","// Parser state class\n\n'use strict';\n\nvar Token = require('../token');\nvar isSpace = require('../common/utils').isSpace;\n\n\nfunction StateBlock(src, md, env, tokens) {\n var ch, s, start, pos, len, indent, offset, indent_found;\n\n this.src = src;\n\n // link to parser instance\n this.md = md;\n\n this.env = env;\n\n //\n // Internal state vartiables\n //\n\n this.tokens = tokens;\n\n this.bMarks = []; // line begin offsets for fast jumps\n this.eMarks = []; // line end offsets for fast jumps\n this.tShift = []; // offsets of the first non-space characters (tabs not expanded)\n this.sCount = []; // indents for each line (tabs expanded)\n\n // An amount of virtual spaces (tabs expanded) between beginning\n // of each line (bMarks) and real beginning of that line.\n //\n // It exists only as a hack because blockquotes override bMarks\n // losing information in the process.\n //\n // It's used only when expanding tabs, you can think about it as\n // an initial tab length, e.g. bsCount=21 applied to string `\\t123`\n // means first tab should be expanded to 4-21%4 === 3 spaces.\n //\n this.bsCount = [];\n\n // block parser variables\n this.blkIndent = 0; // required block content indent (for example, if we are\n // inside a list, it would be positioned after list marker)\n this.line = 0; // line index in src\n this.lineMax = 0; // lines count\n this.tight = false; // loose/tight mode for lists\n this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)\n this.listIndent = -1; // indent of the current list block (-1 if there isn't any)\n\n // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'\n // used in lists to determine if they interrupt a paragraph\n this.parentType = 'root';\n\n this.level = 0;\n\n // renderer\n this.result = '';\n\n // Create caches\n // Generate markers.\n s = this.src;\n indent_found = false;\n\n for (start = pos = indent = offset = 0, len = s.length; pos < len; pos++) {\n ch = s.charCodeAt(pos);\n\n if (!indent_found) {\n if (isSpace(ch)) {\n indent++;\n\n if (ch === 0x09) {\n offset += 4 - offset % 4;\n } else {\n offset++;\n }\n continue;\n } else {\n indent_found = true;\n }\n }\n\n if (ch === 0x0A || pos === len - 1) {\n if (ch !== 0x0A) { pos++; }\n this.bMarks.push(start);\n this.eMarks.push(pos);\n this.tShift.push(indent);\n this.sCount.push(offset);\n this.bsCount.push(0);\n\n indent_found = false;\n indent = 0;\n offset = 0;\n start = pos + 1;\n }\n }\n\n // Push fake entry to simplify cache bounds checks\n this.bMarks.push(s.length);\n this.eMarks.push(s.length);\n this.tShift.push(0);\n this.sCount.push(0);\n this.bsCount.push(0);\n\n this.lineMax = this.bMarks.length - 1; // don't count last fake line\n}\n\n// Push new token to \"stream\".\n//\nStateBlock.prototype.push = function (type, tag, nesting) {\n var token = new Token(type, tag, nesting);\n token.block = true;\n\n if (nesting < 0) this.level--; // closing tag\n token.level = this.level;\n if (nesting > 0) this.level++; // opening tag\n\n this.tokens.push(token);\n return token;\n};\n\nStateBlock.prototype.isEmpty = function isEmpty(line) {\n return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];\n};\n\nStateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {\n for (var max = this.lineMax; from < max; from++) {\n if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {\n break;\n }\n }\n return from;\n};\n\n// Skip spaces from given position.\nStateBlock.prototype.skipSpaces = function skipSpaces(pos) {\n var ch;\n\n for (var max = this.src.length; pos < max; pos++) {\n ch = this.src.charCodeAt(pos);\n if (!isSpace(ch)) { break; }\n }\n return pos;\n};\n\n// Skip spaces from given position in reverse.\nStateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {\n if (pos <= min) { return pos; }\n\n while (pos > min) {\n if (!isSpace(this.src.charCodeAt(--pos))) { return pos + 1; }\n }\n return pos;\n};\n\n// Skip char codes from given position\nStateBlock.prototype.skipChars = function skipChars(pos, code) {\n for (var max = this.src.length; pos < max; pos++) {\n if (this.src.charCodeAt(pos) !== code) { break; }\n }\n return pos;\n};\n\n// Skip char codes reverse from given position - 1\nStateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {\n if (pos <= min) { return pos; }\n\n while (pos > min) {\n if (code !== this.src.charCodeAt(--pos)) { return pos + 1; }\n }\n return pos;\n};\n\n// cut lines range from source.\nStateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {\n var i, lineIndent, ch, first, last, queue, lineStart,\n line = begin;\n\n if (begin >= end) {\n return '';\n }\n\n queue = new Array(end - begin);\n\n for (i = 0; line < end; line++, i++) {\n lineIndent = 0;\n lineStart = first = this.bMarks[line];\n\n if (line + 1 < end || keepLastLF) {\n // No need for bounds check because we have fake entry on tail.\n last = this.eMarks[line] + 1;\n } else {\n last = this.eMarks[line];\n }\n\n while (first < last && lineIndent < indent) {\n ch = this.src.charCodeAt(first);\n\n if (isSpace(ch)) {\n if (ch === 0x09) {\n lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;\n } else {\n lineIndent++;\n }\n } else if (first - lineStart < this.tShift[line]) {\n // patched tShift masked characters to look like spaces (blockquotes, list markers)\n lineIndent++;\n } else {\n break;\n }\n\n first++;\n }\n\n if (lineIndent > indent) {\n // partially expanding tabs in code blocks, e.g '\\t\\tfoobar'\n // with indent=2 becomes ' \\tfoobar'\n queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);\n } else {\n queue[i] = this.src.slice(first, last);\n }\n }\n\n return queue.join('');\n};\n\n// re-export Token class to use in block rules\nStateBlock.prototype.Token = Token;\n\n\nmodule.exports = StateBlock;\n","/** internal\n * class ParserBlock\n *\n * Block-level tokenizer.\n **/\n'use strict';\n\n\nvar Ruler = require('./ruler');\n\n\nvar _rules = [\n // First 2 params - rule name & source. Secondary array - list of rules,\n // which can be terminated by this one.\n [ 'table', require('./rules_block/table'), [ 'paragraph', 'reference' ] ],\n [ 'code', require('./rules_block/code') ],\n [ 'fence', require('./rules_block/fence'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],\n [ 'blockquote', require('./rules_block/blockquote'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],\n [ 'hr', require('./rules_block/hr'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],\n [ 'list', require('./rules_block/list'), [ 'paragraph', 'reference', 'blockquote' ] ],\n [ 'reference', require('./rules_block/reference') ],\n [ 'html_block', require('./rules_block/html_block'), [ 'paragraph', 'reference', 'blockquote' ] ],\n [ 'heading', require('./rules_block/heading'), [ 'paragraph', 'reference', 'blockquote' ] ],\n [ 'lheading', require('./rules_block/lheading') ],\n [ 'paragraph', require('./rules_block/paragraph') ]\n];\n\n\n/**\n * new ParserBlock()\n **/\nfunction ParserBlock() {\n /**\n * ParserBlock#ruler -> Ruler\n *\n * [[Ruler]] instance. Keep configuration of block rules.\n **/\n this.ruler = new Ruler();\n\n for (var i = 0; i < _rules.length; i++) {\n this.ruler.push(_rules[i][0], _rules[i][1], { alt: (_rules[i][2] || []).slice() });\n }\n}\n\n\n// Generate tokens for input range\n//\nParserBlock.prototype.tokenize = function (state, startLine, endLine) {\n var ok, i,\n rules = this.ruler.getRules(''),\n len = rules.length,\n line = startLine,\n hasEmptyLines = false,\n maxNesting = state.md.options.maxNesting;\n\n while (line < endLine) {\n state.line = line = state.skipEmptyLines(line);\n if (line >= endLine) { break; }\n\n // Termination condition for nested calls.\n // Nested calls currently used for blockquotes & lists\n if (state.sCount[line] < state.blkIndent) { break; }\n\n // If nesting level exceeded - skip tail to the end. That's not ordinary\n // situation and we should not care about content.\n if (state.level >= maxNesting) {\n state.line = endLine;\n break;\n }\n\n // Try all possible rules.\n // On success, rule should:\n //\n // - update `state.line`\n // - update `state.tokens`\n // - return true\n\n for (i = 0; i < len; i++) {\n ok = rules[i](state, line, endLine, false);\n if (ok) { break; }\n }\n\n // set state.tight if we had an empty line before current tag\n // i.e. latest empty line should not count\n state.tight = !hasEmptyLines;\n\n // paragraph might \"eat\" one newline after it in nested lists\n if (state.isEmpty(state.line - 1)) {\n hasEmptyLines = true;\n }\n\n line = state.line;\n\n if (line < endLine && state.isEmpty(line)) {\n hasEmptyLines = true;\n line++;\n state.line = line;\n }\n }\n};\n\n\n/**\n * ParserBlock.parse(str, md, env, outTokens)\n *\n * Process input string and push block tokens into `outTokens`\n **/\nParserBlock.prototype.parse = function (src, md, env, outTokens) {\n var state;\n\n if (!src) { return; }\n\n state = new this.State(src, md, env, outTokens);\n\n this.tokenize(state, state.line, state.lineMax);\n};\n\n\nParserBlock.prototype.State = require('./rules_block/state_block');\n\n\nmodule.exports = ParserBlock;\n","// Skip text characters for text token, place those to pending buffer\n// and increment current pos\n\n'use strict';\n\n\n// Rule to skip pure text\n// '{}$%@~+=:' reserved for extentions\n\n// !, \", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \\, ], ^, _, `, {, |, }, or ~\n\n// !!!! Don't confuse with \"Markdown ASCII Punctuation\" chars\n// http://spec.commonmark.org/0.15/#ascii-punctuation-character\nfunction isTerminatorChar(ch) {\n switch (ch) {\n case 0x0A/* \\n */:\n case 0x21/* ! */:\n case 0x23/* # */:\n case 0x24/* $ */:\n case 0x25/* % */:\n case 0x26/* & */:\n case 0x2A/* * */:\n case 0x2B/* + */:\n case 0x2D/* - */:\n case 0x3A/* : */:\n case 0x3C/* < */:\n case 0x3D/* = */:\n case 0x3E/* > */:\n case 0x40/* @ */:\n case 0x5B/* [ */:\n case 0x5C/* \\ */:\n case 0x5D/* ] */:\n case 0x5E/* ^ */:\n case 0x5F/* _ */:\n case 0x60/* ` */:\n case 0x7B/* { */:\n case 0x7D/* } */:\n case 0x7E/* ~ */:\n return true;\n default:\n return false;\n }\n}\n\nmodule.exports = function text(state, silent) {\n var pos = state.pos;\n\n while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {\n pos++;\n }\n\n if (pos === state.pos) { return false; }\n\n if (!silent) { state.pending += state.src.slice(state.pos, pos); }\n\n state.pos = pos;\n\n return true;\n};\n\n// Alternative implementation, for memory.\n//\n// It costs 10% of performance, but allows extend terminators list, if place it\n// to `ParcerInline` property. Probably, will switch to it sometime, such\n// flexibility required.\n\n/*\nvar TERMINATOR_RE = /[\\n!#$%&*+\\-:<=>@[\\\\\\]^_`{}~]/;\n\nmodule.exports = function text(state, silent) {\n var pos = state.pos,\n idx = state.src.slice(pos).search(TERMINATOR_RE);\n\n // first char is terminator -> empty text\n if (idx === 0) { return false; }\n\n // no terminator -> text till end of string\n if (idx < 0) {\n if (!silent) { state.pending += state.src.slice(pos); }\n state.pos = state.src.length;\n return true;\n }\n\n if (!silent) { state.pending += state.src.slice(pos, pos + idx); }\n\n state.pos += idx;\n\n return true;\n};*/\n","// Process links like https://example.org/\n\n'use strict';\n\n\n// RFC3986: scheme = ALPHA *( ALPHA / DIGIT / \"+\" / \"-\" / \".\" )\nvar SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i;\n\n\nmodule.exports = function linkify(state, silent) {\n var pos, max, match, proto, link, url, fullUrl, token;\n\n if (!state.md.options.linkify) return false;\n if (state.linkLevel > 0) return false;\n\n pos = state.pos;\n max = state.posMax;\n\n if (pos + 3 > max) return false;\n if (state.src.charCodeAt(pos) !== 0x3A/* : */) return false;\n if (state.src.charCodeAt(pos + 1) !== 0x2F/* / */) return false;\n if (state.src.charCodeAt(pos + 2) !== 0x2F/* / */) return false;\n\n match = state.pending.match(SCHEME_RE);\n if (!match) return false;\n\n proto = match[1];\n\n link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length));\n if (!link) return false;\n\n url = link.url;\n\n // disallow '*' at the end of the link (conflicts with emphasis)\n url = url.replace(/\\*+$/, '');\n\n fullUrl = state.md.normalizeLink(url);\n if (!state.md.validateLink(fullUrl)) return false;\n\n if (!silent) {\n state.pending = state.pending.slice(0, -proto.length);\n\n token = state.push('link_open', 'a', 1);\n token.attrs = [ [ 'href', fullUrl ] ];\n token.markup = 'linkify';\n token.info = 'auto';\n\n token = state.push('text', '', 0);\n token.content = state.md.normalizeLinkText(url);\n\n token = state.push('link_close', 'a', -1);\n token.markup = 'linkify';\n token.info = 'auto';\n }\n\n state.pos += url.length - proto.length;\n return true;\n};\n","// Proceess '\\n'\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\n\nmodule.exports = function newline(state, silent) {\n var pmax, max, ws, pos = state.pos;\n\n if (state.src.charCodeAt(pos) !== 0x0A/* \\n */) { return false; }\n\n pmax = state.pending.length - 1;\n max = state.posMax;\n\n // ' \\n' -> hardbreak\n // Lookup in pending chars is bad practice! Don't copy to other rules!\n // Pending string is stored in concat mode, indexed lookups will cause\n // convertion to flat mode.\n if (!silent) {\n if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {\n if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {\n // Find whitespaces tail of pending chars.\n ws = pmax - 1;\n while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) ws--;\n\n state.pending = state.pending.slice(0, ws);\n state.push('hardbreak', 'br', 0);\n } else {\n state.pending = state.pending.slice(0, -1);\n state.push('softbreak', 'br', 0);\n }\n\n } else {\n state.push('softbreak', 'br', 0);\n }\n }\n\n pos++;\n\n // skip heading spaces for next line\n while (pos < max && isSpace(state.src.charCodeAt(pos))) { pos++; }\n\n state.pos = pos;\n return true;\n};\n","// Process escaped chars and hardbreaks\n\n'use strict';\n\nvar isSpace = require('../common/utils').isSpace;\n\nvar ESCAPED = [];\n\nfor (var i = 0; i < 256; i++) { ESCAPED.push(0); }\n\n'\\\\!\"#$%&\\'()*+,./:;<=>?@[]^_`{|}~-'\n .split('').forEach(function (ch) { ESCAPED[ch.charCodeAt(0)] = 1; });\n\n\nmodule.exports = function escape(state, silent) {\n var ch1, ch2, origStr, escapedStr, token, pos = state.pos, max = state.posMax;\n\n if (state.src.charCodeAt(pos) !== 0x5C/* \\ */) return false;\n pos++;\n\n // '\\' at the end of the inline block\n if (pos >= max) return false;\n\n ch1 = state.src.charCodeAt(pos);\n\n if (ch1 === 0x0A) {\n if (!silent) {\n state.push('hardbreak', 'br', 0);\n }\n\n pos++;\n // skip leading whitespaces from next line\n while (pos < max) {\n ch1 = state.src.charCodeAt(pos);\n if (!isSpace(ch1)) break;\n pos++;\n }\n\n state.pos = pos;\n return true;\n }\n\n escapedStr = state.src[pos];\n\n if (ch1 >= 0xD800 && ch1 <= 0xDBFF && pos + 1 < max) {\n ch2 = state.src.charCodeAt(pos + 1);\n\n if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {\n escapedStr += state.src[pos + 1];\n pos++;\n }\n }\n\n origStr = '\\\\' + escapedStr;\n\n if (!silent) {\n token = state.push('text_special', '', 0);\n\n if (ch1 < 256 && ESCAPED[ch1] !== 0) {\n token.content = escapedStr;\n } else {\n token.content = origStr;\n }\n\n token.markup = origStr;\n token.info = 'escape';\n }\n\n state.pos = pos + 1;\n return true;\n};\n","// Parse backticks\n\n'use strict';\n\n\nmodule.exports = function backtick(state, silent) {\n var start, max, marker, token, matchStart, matchEnd, openerLength, closerLength,\n pos = state.pos,\n ch = state.src.charCodeAt(pos);\n\n if (ch !== 0x60/* ` */) { return false; }\n\n start = pos;\n pos++;\n max = state.posMax;\n\n // scan marker length\n while (pos < max && state.src.charCodeAt(pos) === 0x60/* ` */) { pos++; }\n\n marker = state.src.slice(start, pos);\n openerLength = marker.length;\n\n if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) {\n if (!silent) state.pending += marker;\n state.pos += openerLength;\n return true;\n }\n\n matchStart = matchEnd = pos;\n\n // Nothing found in the cache, scan until the end of the line (or until marker is found)\n while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {\n matchEnd = matchStart + 1;\n\n // scan marker length\n while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60/* ` */) { matchEnd++; }\n\n closerLength = matchEnd - matchStart;\n\n if (closerLength === openerLength) {\n // Found matching closer length.\n if (!silent) {\n token = state.push('code_inline', 'code', 0);\n token.markup = marker;\n token.content = state.src.slice(pos, matchStart)\n .replace(/\\n/g, ' ')\n .replace(/^ (.+) $/, '$1');\n }\n state.pos = matchEnd;\n return true;\n }\n\n // Some different length found, put it in cache as upper limit of where closer can be found\n state.backticks[closerLength] = matchStart;\n }\n\n // Scanned through the end, didn't find anything\n state.backticksScanned = true;\n\n if (!silent) state.pending += marker;\n state.pos += openerLength;\n return true;\n};\n","// ~~strike through~~\n//\n'use strict';\n\n\n// Insert each marker as a separate text token, and add it to delimiter list\n//\nmodule.exports.tokenize = function strikethrough(state, silent) {\n var i, scanned, token, len, ch,\n start = state.pos,\n marker = state.src.charCodeAt(start);\n\n if (silent) { return false; }\n\n if (marker !== 0x7E/* ~ */) { return false; }\n\n scanned = state.scanDelims(state.pos, true);\n len = scanned.length;\n ch = String.fromCharCode(marker);\n\n if (len < 2) { return false; }\n\n if (len % 2) {\n token = state.push('text', '', 0);\n token.content = ch;\n len--;\n }\n\n for (i = 0; i < len; i += 2) {\n token = state.push('text', '', 0);\n token.content = ch + ch;\n\n state.delimiters.push({\n marker: marker,\n length: 0, // disable \"rule of 3\" length checks meant for emphasis\n token: state.tokens.length - 1,\n end: -1,\n open: scanned.can_open,\n close: scanned.can_close\n });\n }\n\n state.pos += scanned.length;\n\n return true;\n};\n\n\nfunction postProcess(state, delimiters) {\n var i, j,\n startDelim,\n endDelim,\n token,\n loneMarkers = [],\n max = delimiters.length;\n\n for (i = 0; i < max; i++) {\n startDelim = delimiters[i];\n\n if (startDelim.marker !== 0x7E/* ~ */) {\n continue;\n }\n\n if (startDelim.end === -1) {\n continue;\n }\n\n endDelim = delimiters[startDelim.end];\n\n token = state.tokens[startDelim.token];\n token.type = 's_open';\n token.tag = 's';\n token.nesting = 1;\n token.markup = '~~';\n token.content = '';\n\n token = state.tokens[endDelim.token];\n token.type = 's_close';\n token.tag = 's';\n token.nesting = -1;\n token.markup = '~~';\n token.content = '';\n\n if (state.tokens[endDelim.token - 1].type === 'text' &&\n state.tokens[endDelim.token - 1].content === '~') {\n\n loneMarkers.push(endDelim.token - 1);\n }\n }\n\n // If a marker sequence has an odd number of characters, it's splitted\n // like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the\n // start of the sequence.\n //\n // So, we have to move all those markers after subsequent s_close tags.\n //\n while (loneMarkers.length) {\n i = loneMarkers.pop();\n j = i + 1;\n\n while (j < state.tokens.length && state.tokens[j].type === 's_close') {\n j++;\n }\n\n j--;\n\n if (i !== j) {\n token = state.tokens[j];\n state.tokens[j] = state.tokens[i];\n state.tokens[i] = token;\n }\n }\n}\n\n\n// Walk through delimiter list and replace text tokens with tags\n//\nmodule.exports.postProcess = function strikethrough(state) {\n var curr,\n tokens_meta = state.tokens_meta,\n max = state.tokens_meta.length;\n\n postProcess(state, state.delimiters);\n\n for (curr = 0; curr < max; curr++) {\n if (tokens_meta[curr] && tokens_meta[curr].delimiters) {\n postProcess(state, tokens_meta[curr].delimiters);\n }\n }\n};\n","// Process *this* and _that_\n//\n'use strict';\n\n\n// Insert each marker as a separate text token, and add it to delimiter list\n//\nmodule.exports.tokenize = function emphasis(state, silent) {\n var i, scanned, token,\n start = state.pos,\n marker = state.src.charCodeAt(start);\n\n if (silent) { return false; }\n\n if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) { return false; }\n\n scanned = state.scanDelims(state.pos, marker === 0x2A);\n\n for (i = 0; i < scanned.length; i++) {\n token = state.push('text', '', 0);\n token.content = String.fromCharCode(marker);\n\n state.delimiters.push({\n // Char code of the starting marker (number).\n //\n marker: marker,\n\n // Total length of these series of delimiters.\n //\n length: scanned.length,\n\n // A position of the token this delimiter corresponds to.\n //\n token: state.tokens.length - 1,\n\n // If this delimiter is matched as a valid opener, `end` will be\n // equal to its position, otherwise it's `-1`.\n //\n end: -1,\n\n // Boolean flags that determine if this delimiter could open or close\n // an emphasis.\n //\n open: scanned.can_open,\n close: scanned.can_close\n });\n }\n\n state.pos += scanned.length;\n\n return true;\n};\n\n\nfunction postProcess(state, delimiters) {\n var i,\n startDelim,\n endDelim,\n token,\n ch,\n isStrong,\n max = delimiters.length;\n\n for (i = max - 1; i >= 0; i--) {\n startDelim = delimiters[i];\n\n if (startDelim.marker !== 0x5F/* _ */ && startDelim.marker !== 0x2A/* * */) {\n continue;\n }\n\n // Process only opening markers\n if (startDelim.end === -1) {\n continue;\n }\n\n endDelim = delimiters[startDelim.end];\n\n // If the previous delimiter has the same marker and is adjacent to this one,\n // merge those into one strong delimiter.\n //\n // `whatever` -> `whatever`\n //\n isStrong = i > 0 &&\n delimiters[i - 1].end === startDelim.end + 1 &&\n // check that first two markers match and adjacent\n delimiters[i - 1].marker === startDelim.marker &&\n delimiters[i - 1].token === startDelim.token - 1 &&\n // check that last two markers are adjacent (we can safely assume they match)\n delimiters[startDelim.end + 1].token === endDelim.token + 1;\n\n ch = String.fromCharCode(startDelim.marker);\n\n token = state.tokens[startDelim.token];\n token.type = isStrong ? 'strong_open' : 'em_open';\n token.tag = isStrong ? 'strong' : 'em';\n token.nesting = 1;\n token.markup = isStrong ? ch + ch : ch;\n token.content = '';\n\n token = state.tokens[endDelim.token];\n token.type = isStrong ? 'strong_close' : 'em_close';\n token.tag = isStrong ? 'strong' : 'em';\n token.nesting = -1;\n token.markup = isStrong ? ch + ch : ch;\n token.content = '';\n\n if (isStrong) {\n state.tokens[delimiters[i - 1].token].content = '';\n state.tokens[delimiters[startDelim.end + 1].token].content = '';\n i--;\n }\n }\n}\n\n\n// Walk through delimiter list and replace text tokens with tags\n//\nmodule.exports.postProcess = function emphasis(state) {\n var curr,\n tokens_meta = state.tokens_meta,\n max = state.tokens_meta.length;\n\n postProcess(state, state.delimiters);\n\n for (curr = 0; curr < max; curr++) {\n if (tokens_meta[curr] && tokens_meta[curr].delimiters) {\n postProcess(state, tokens_meta[curr].delimiters);\n }\n }\n};\n","// Process [link](
)\n breaks: false, // Convert '\\n' in paragraphs into
\n langPrefix: 'language-', // CSS language prefix for fenced blocks\n linkify: false, // autoconvert URL-like texts to links\n\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: '\\u201c\\u201d\\u2018\\u2019', /* “”‘’ */\n\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with
\n langPrefix: 'language-', // CSS language prefix for fenced blocks\n linkify: false, // autoconvert URL-like texts to links\n\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: '\\u201c\\u201d\\u2018\\u2019', /* “”‘’ */\n\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with
\n langPrefix: 'language-', // CSS language prefix for fenced blocks\n linkify: false, // autoconvert URL-like texts to links\n\n // Enable some language-neutral replacements + quotes beautification\n typographer: false,\n\n // Double + single quotes replacement pairs, when typographer enabled,\n // and smartquotes on. Could be either a String or an Array.\n //\n // For example, you can use '«»„“' for Russian, '„“‚‘' for German,\n // and ['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›'] for French (including nbsp).\n quotes: '\\u201c\\u201d\\u2018\\u2019', /* “”‘’ */\n\n // Highlighter function. Should return escaped HTML,\n // or '' if the source string is not changed and should be escaped externaly.\n // If result starts with
`). This is needed only for full CommonMark compatibility. In real\n * world you will need HTML output.\n * - __breaks__ - `false`. Set `true` to convert `\\n` in paragraphs into `
`.\n * - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks.\n * Can be useful for external highlighters.\n * - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links.\n * - __typographer__ - `false`. Set `true` to enable [some language-neutral\n * replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.js) +\n * quotes beautification (smartquotes).\n * - __quotes__ - `“”‘’`, String or Array. Double + single quotes replacement\n * pairs, when typographer enabled and smartquotes on. For example, you can\n * use `'«»„“'` for Russian, `'„“‚‘'` for German, and\n * `['«\\xA0', '\\xA0»', '‹\\xA0', '\\xA0›']` for French (including nbsp).\n * - __highlight__ - `null`. Highlighter function for fenced code blocks.\n * Highlighter `function (str, lang)` should return escaped HTML. It can also\n * return empty string if the source was not changed and should be escaped\n * externaly. If result starts with
';\n * } catch (__) {}\n * }\n *\n * return '' +\n * hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +\n * '
';\n * }\n * });\n * ```\n *\n **/\nfunction MarkdownIt(presetName, options) {\n if (!(this instanceof MarkdownIt)) {\n return new MarkdownIt(presetName, options);\n }\n\n if (!options) {\n if (!utils.isString(presetName)) {\n options = presetName || {};\n presetName = 'default';\n }\n }\n\n /**\n * MarkdownIt#inline -> ParserInline\n *\n * Instance of [[ParserInline]]. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n this.inline = new ParserInline();\n\n /**\n * MarkdownIt#block -> ParserBlock\n *\n * Instance of [[ParserBlock]]. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n this.block = new ParserBlock();\n\n /**\n * MarkdownIt#core -> Core\n *\n * Instance of [[Core]] chain executor. You may need it to add new rules when\n * writing plugins. For simple rules control use [[MarkdownIt.disable]] and\n * [[MarkdownIt.enable]].\n **/\n this.core = new ParserCore();\n\n /**\n * MarkdownIt#renderer -> Renderer\n *\n * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering\n * rules for new token types, generated by plugins.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')();\n *\n * function myToken(tokens, idx, options, env, self) {\n * //...\n * return result;\n * };\n *\n * md.renderer.rules['my_token'] = myToken\n * ```\n *\n * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js).\n **/\n this.renderer = new Renderer();\n\n /**\n * MarkdownIt#linkify -> LinkifyIt\n *\n * [linkify-it](https://github.com/markdown-it/linkify-it) instance.\n * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.js)\n * rule.\n **/\n this.linkify = new LinkifyIt();\n\n /**\n * MarkdownIt#validateLink(url) -> Boolean\n *\n * Link validation function. CommonMark allows too much in links. By default\n * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas\n * except some embedded image types.\n *\n * You can change this behaviour:\n *\n * ```javascript\n * var md = require('markdown-it')();\n * // enable everything\n * md.validateLink = function () { return true; }\n * ```\n **/\n this.validateLink = validateLink;\n\n /**\n * MarkdownIt#normalizeLink(url) -> String\n *\n * Function used to encode link url to a machine-readable format,\n * which includes url-encoding, punycode, etc.\n **/\n this.normalizeLink = normalizeLink;\n\n /**\n * MarkdownIt#normalizeLinkText(url) -> String\n *\n * Function used to decode link url to a human-readable format`\n **/\n this.normalizeLinkText = normalizeLinkText;\n\n\n // Expose utils & helpers for easy acces from plugins\n\n /**\n * MarkdownIt#utils -> utils\n *\n * Assorted utility functions, useful to write plugins. See details\n * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.js).\n **/\n this.utils = utils;\n\n /**\n * MarkdownIt#helpers -> helpers\n *\n * Link components parser functions, useful to write plugins. See details\n * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).\n **/\n this.helpers = utils.assign({}, helpers);\n\n\n this.options = {};\n this.configure(presetName);\n\n if (options) { this.set(options); }\n}\n\n\n/** chainable\n * MarkdownIt.set(options)\n *\n * Set parser options (in the same format as in constructor). Probably, you\n * will never need it, but you can change options after constructor call.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')()\n * .set({ html: true, breaks: true })\n * .set({ typographer, true });\n * ```\n *\n * __Note:__ To achieve the best possible performance, don't modify a\n * `markdown-it` instance options on the fly. If you need multiple configurations\n * it's best to create multiple instances and initialize each with separate\n * config.\n **/\nMarkdownIt.prototype.set = function (options) {\n utils.assign(this.options, options);\n return this;\n};\n\n\n/** chainable, internal\n * MarkdownIt.configure(presets)\n *\n * Batch load of all options and compenent settings. This is internal method,\n * and you probably will not need it. But if you will - see available presets\n * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)\n *\n * We strongly recommend to use presets instead of direct config loads. That\n * will give better compatibility with next versions.\n **/\nMarkdownIt.prototype.configure = function (presets) {\n var self = this, presetName;\n\n if (utils.isString(presets)) {\n presetName = presets;\n presets = config[presetName];\n if (!presets) { throw new Error('Wrong `markdown-it` preset \"' + presetName + '\", check name'); }\n }\n\n if (!presets) { throw new Error('Wrong `markdown-it` preset, can\\'t be empty'); }\n\n if (presets.options) { self.set(presets.options); }\n\n if (presets.components) {\n Object.keys(presets.components).forEach(function (name) {\n if (presets.components[name].rules) {\n self[name].ruler.enableOnly(presets.components[name].rules);\n }\n if (presets.components[name].rules2) {\n self[name].ruler2.enableOnly(presets.components[name].rules2);\n }\n });\n }\n return this;\n};\n\n\n/** chainable\n * MarkdownIt.enable(list, ignoreInvalid)\n * - list (String|Array): rule name or list of rule names to enable\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * Enable list or rules. It will automatically find appropriate components,\n * containing rules with given names. If rule not found, and `ignoreInvalid`\n * not set - throws exception.\n *\n * ##### Example\n *\n * ```javascript\n * var md = require('markdown-it')()\n * .enable(['sub', 'sup'])\n * .disable('smartquotes');\n * ```\n **/\nMarkdownIt.prototype.enable = function (list, ignoreInvalid) {\n var result = [];\n\n if (!Array.isArray(list)) { list = [ list ]; }\n\n [ 'core', 'block', 'inline' ].forEach(function (chain) {\n result = result.concat(this[chain].ruler.enable(list, true));\n }, this);\n\n result = result.concat(this.inline.ruler2.enable(list, true));\n\n var missed = list.filter(function (name) { return result.indexOf(name) < 0; });\n\n if (missed.length && !ignoreInvalid) {\n throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);\n }\n\n return this;\n};\n\n\n/** chainable\n * MarkdownIt.disable(list, ignoreInvalid)\n * - list (String|Array): rule name or list of rule names to disable.\n * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.\n *\n * The same as [[MarkdownIt.enable]], but turn specified rules off.\n **/\nMarkdownIt.prototype.disable = function (list, ignoreInvalid) {\n var result = [];\n\n if (!Array.isArray(list)) { list = [ list ]; }\n\n [ 'core', 'block', 'inline' ].forEach(function (chain) {\n result = result.concat(this[chain].ruler.disable(list, true));\n }, this);\n\n result = result.concat(this.inline.ruler2.disable(list, true));\n\n var missed = list.filter(function (name) { return result.indexOf(name) < 0; });\n\n if (missed.length && !ignoreInvalid) {\n throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);\n }\n return this;\n};\n\n\n/** chainable\n * MarkdownIt.use(plugin, params)\n *\n * Load specified plugin with given params into current parser instance.\n * It's just a sugar to call `plugin(md, params)` with curring.\n *\n * ##### Example\n *\n * ```javascript\n * var iterator = require('markdown-it-for-inline');\n * var md = require('markdown-it')()\n * .use(iterator, 'foo_replace', 'text', function (tokens, idx) {\n * tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');\n * });\n * ```\n **/\nMarkdownIt.prototype.use = function (plugin /*, params, ... */) {\n var args = [ this ].concat(Array.prototype.slice.call(arguments, 1));\n plugin.apply(plugin, args);\n return this;\n};\n\n\n/** internal\n * MarkdownIt.parse(src, env) -> Array\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Parse input string and return list of block tokens (special token type\n * \"inline\" will contain list of inline tokens). You should not call this\n * method directly, until you write custom renderer (for example, to produce\n * AST).\n *\n * `env` is used to pass data between \"distributed\" rules and return additional\n * metadata like reference info, needed for the renderer. It also can be used to\n * inject data in specific cases. Usually, you will be ok to pass `{}`,\n * and then pass updated object to renderer.\n **/\nMarkdownIt.prototype.parse = function (src, env) {\n if (typeof src !== 'string') {\n throw new Error('Input data should be a String');\n }\n\n var state = new this.core.State(src, this, env);\n\n this.core.process(state);\n\n return state.tokens;\n};\n\n\n/**\n * MarkdownIt.render(src [, env]) -> String\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Render markdown string into html. It does all magic for you :).\n *\n * `env` can be used to inject additional metadata (`{}` by default).\n * But you will not need it with high probability. See also comment\n * in [[MarkdownIt.parse]].\n **/\nMarkdownIt.prototype.render = function (src, env) {\n env = env || {};\n\n return this.renderer.render(this.parse(src, env), this.options, env);\n};\n\n\n/** internal\n * MarkdownIt.parseInline(src, env) -> Array\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the\n * block tokens list with the single `inline` element, containing parsed inline\n * tokens in `children` property. Also updates `env` object.\n **/\nMarkdownIt.prototype.parseInline = function (src, env) {\n var state = new this.core.State(src, this, env);\n\n state.inlineMode = true;\n this.core.process(state);\n\n return state.tokens;\n};\n\n\n/**\n * MarkdownIt.renderInline(src [, env]) -> String\n * - src (String): source string\n * - env (Object): environment sandbox\n *\n * Similar to [[MarkdownIt.render]] but for single paragraph content. Result\n * will NOT be wrapped into `' + md.utils.escapeHtml(str) + '
${r instanceof Error&&r.stack?r.stack.replaceAll(`
+`,"
"):String(r)}