Merge branch 'master' into add_absolute_links_support

This commit is contained in:
joaofreires 2022-11-09 11:47:50 -03:00
commit bcd63f97c5
No known key found for this signature in database
GPG Key ID: C21AD65E2F4A6C65
14 changed files with 153 additions and 108 deletions

View File

@ -116,7 +116,7 @@ If possible, do your best to avoid breaking older browser releases.
Any change to the HTML or styling is encouraged to manually check on as many browsers and platforms that you can.
Unfortunately at this time we don't have any automated UI or browser testing, so your assistance in testing is appreciated.
## Updating higlight.js
## Updating highlight.js
The following are instructions for updating [highlight.js](https://highlightjs.org/).

8
Cargo.lock generated
View File

@ -809,10 +809,10 @@ dependencies = [
"futures-util",
"gitignore",
"handlebars",
"lazy_static",
"log",
"memchr",
"notify",
"once_cell",
"opener",
"predicates",
"pretty_assertions",
@ -997,6 +997,12 @@ dependencies = [
"libc",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "opaque-debug"
version = "0.2.3"

View File

@ -20,9 +20,9 @@ anyhow = "1.0.28"
chrono = "0.4"
clap = { version = "3.0", features = ["cargo"] }
clap_complete = "3.0"
once_cell = "1"
env_logger = "0.9.0"
handlebars = "4.0"
lazy_static = "1.0"
log = "0.4"
memchr = "2.0"
opener = "0.5"
@ -65,3 +65,7 @@ search = ["elasticlunr-rs", "ammonia"]
[[bin]]
doc = false
name = "mdbook"
[[example]]
name = "nop-preprocessor"
test = true

View File

@ -101,4 +101,58 @@ mod nop_lib {
renderer != "not-supported"
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn nop_preprocessor_run() {
let input_json = r##"[
{
"root": "/path/to/book",
"config": {
"book": {
"authors": ["AUTHOR"],
"language": "en",
"multilingual": false,
"src": "src",
"title": "TITLE"
},
"preprocessor": {
"nop": {}
}
},
"renderer": "html",
"mdbook_version": "0.4.21"
},
{
"sections": [
{
"Chapter": {
"name": "Chapter 1",
"content": "# Chapter 1\n",
"number": [1],
"sub_items": [],
"path": "chapter_1.md",
"source_path": "chapter_1.md",
"parent_names": []
}
}
],
"__non_exhaustive": null
}
]"##;
let input_json = input_json.as_bytes();
let (ctx, book) = mdbook::preprocess::CmdPreprocessor::parse_input(input_json).unwrap();
let expected_book = book.clone();
let result = Nop::new().run(&ctx, book);
assert!(result.is_ok());
// The nop-preprocessor should not have made any changes to the book content.
let actual_book = result.unwrap();
assert_eq!(actual_book, expected_book);
}
}
}

View File

@ -454,7 +454,7 @@ impl<'a> SummaryParser<'a> {
items.push(item);
}
Some(Event::Start(Tag::List(..))) => {
// Skip this tag after comment bacause it is not nested.
// Skip this tag after comment because it is not nested.
if items.is_empty() {
continue;
}

View File

@ -89,8 +89,7 @@ pub fn execute(args: &ArgMatches) -> Result<()> {
let input_404 = book
.config
.get("output.html.input-404")
.map(toml::Value::as_str)
.and_then(std::convert::identity) // flatten
.and_then(toml::Value::as_str)
.map(ToString::to_string);
let file_404 = get_404_output_file(&input_404);

View File

@ -4,8 +4,8 @@ use std::path::Path;
use super::{Preprocessor, PreprocessorContext};
use crate::book::{Book, BookItem};
use crate::errors::*;
use lazy_static::lazy_static;
use log::warn;
use once_cell::sync::Lazy;
/// A preprocessor for converting file name `README.md` to `index.md` since
/// `README.md` is the de facto index file in markdown-based documentation.
@ -68,9 +68,8 @@ fn warn_readme_name_conflict<P: AsRef<Path>>(readme_path: P, index_path: P) {
}
fn is_readme_file<P: AsRef<Path>>(path: P) -> bool {
lazy_static! {
static ref RE: Regex = Regex::new(r"(?i)^readme$").unwrap();
}
static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)^readme$").unwrap());
RE.is_match(
path.as_ref()
.file_stem()

View File

@ -10,8 +10,8 @@ use std::path::{Path, PathBuf};
use super::{Preprocessor, PreprocessorContext};
use crate::book::{Book, BookItem};
use lazy_static::lazy_static;
use log::{error, warn};
use once_cell::sync::Lazy;
const ESCAPE_CHAR: char = '\\';
const MAX_LINK_NESTED_DEPTH: usize = 10;
@ -410,8 +410,8 @@ impl<'a> Iterator for LinkIter<'a> {
fn find_links(contents: &str) -> LinkIter<'_> {
// lazily compute following regex
// r"\\\{\{#.*\}\}|\{\{#([a-zA-Z0-9]+)\s*([^}]+)\}\}")?;
lazy_static! {
static ref RE: Regex = Regex::new(
static RE: Lazy<Regex> = Lazy::new(|| {
Regex::new(
r"(?x) # insignificant whitespace mode
\\\{\{\#.*\}\} # match escaped link
| # or
@ -419,10 +419,11 @@ fn find_links(contents: &str) -> LinkIter<'_> {
\#([a-zA-Z0-9_]+) # link type
\s+ # separating whitespace
([^}]+) # link target path and space separated properties
\}\} # link closing parens"
\}\} # link closing parens",
)
.unwrap();
}
.unwrap()
});
LinkIter(RE.captures_iter(contents))
}

View File

@ -14,8 +14,8 @@ use std::path::{Path, PathBuf};
use crate::utils::fs::get_404_output_file;
use handlebars::Handlebars;
use lazy_static::lazy_static;
use log::{debug, trace, warn};
use once_cell::sync::Lazy;
use regex::{Captures, Regex};
use serde_json::json;
@ -780,9 +780,8 @@ fn make_data(
/// Goes through the rendered HTML, making sure all header tags have
/// an anchor respectively so people can link to sections directly.
fn build_header_links(html: &str) -> String {
lazy_static! {
static ref BUILD_HEADER_LINKS: Regex = Regex::new(r"<h(\d)>(.*?)</h\d>").unwrap();
}
static BUILD_HEADER_LINKS: Lazy<Regex> =
Lazy::new(|| Regex::new(r"<h(\d)>(.*?)</h\d>").unwrap());
let mut id_counter = HashMap::new();
@ -823,10 +822,8 @@ fn insert_link_into_header(
// ```
// This function replaces all commas by spaces in the code block classes
fn fix_code_blocks(html: &str) -> String {
lazy_static! {
static ref FIX_CODE_BLOCKS: Regex =
Regex::new(r##"<code([^>]+)class="([^"]+)"([^>]*)>"##).unwrap();
}
static FIX_CODE_BLOCKS: Lazy<Regex> =
Lazy::new(|| Regex::new(r##"<code([^>]+)class="([^"]+)"([^>]*)>"##).unwrap());
FIX_CODE_BLOCKS
.replace_all(html, |caps: &Captures<'_>| {
@ -849,10 +846,9 @@ fn add_playground_pre(
playground_config: &Playground,
edition: Option<RustEdition>,
) -> String {
lazy_static! {
static ref ADD_PLAYGROUND_PRE: Regex =
Regex::new(r##"((?s)<code[^>]?class="([^"]+)".*?>(.*?)</code>)"##).unwrap();
}
static ADD_PLAYGROUND_PRE: Lazy<Regex> =
Lazy::new(|| Regex::new(r##"((?s)<code[^>]?class="([^"]+)".*?>(.*?)</code>)"##).unwrap());
ADD_PLAYGROUND_PRE
.replace_all(html, |caps: &Captures<'_>| {
let text = &caps[1];
@ -915,9 +911,7 @@ fn add_playground_pre(
}
fn hide_lines(content: &str) -> String {
lazy_static! {
static ref BORING_LINES_REGEX: Regex = Regex::new(r"^(\s*)#(.?)(.*)$").unwrap();
}
static BORING_LINES_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(\s*)#(.?)(.*)$").unwrap());
let mut result = String::with_capacity(content.len());
let mut lines = content.lines().peekable();

View File

@ -148,15 +148,12 @@ fn render(
trace!("Render template");
_h.template()
.ok_or_else(|| RenderError::new("Error with the handlebars template"))
.and_then(|t| {
let t = _h
.template()
.ok_or_else(|| RenderError::new("Error with the handlebars template"))?;
let local_ctx = Context::wraps(&context)?;
let mut local_rc = rc.clone();
t.render(r, &local_ctx, &mut local_rc, out)
})?;
Ok(())
}
pub fn previous(

View File

@ -117,13 +117,11 @@ impl HelperDef for RenderToc {
}
// Link
let path_exists = if let Some(path) =
item.get("path")
.and_then(|p| if p.is_empty() { None } else { Some(p) })
{
let path_exists: bool;
match item.get("path") {
Some(path) if !path.is_empty() => {
out.write("<a href=\"")?;
let tmp = Path::new(item.get("path").expect("Error: path should be Some(_)"))
let tmp = Path::new(path)
.with_extension("html")
.to_str()
.unwrap()
@ -141,11 +139,13 @@ impl HelperDef for RenderToc {
}
out.write(">")?;
true
} else {
path_exists = true;
}
_ => {
out.write("<div>")?;
false
};
path_exists = false;
}
}
if !self.no_section_label {
// Section does not necessarily exist

View File

@ -3,6 +3,7 @@ use std::collections::{HashMap, HashSet};
use std::path::Path;
use elasticlunr::{Index, IndexBuilder};
use once_cell::sync::Lazy;
use pulldown_cmark::*;
use crate::book::{Book, BookItem};
@ -10,7 +11,6 @@ use crate::config::Search;
use crate::errors::*;
use crate::theme::searcher;
use crate::utils;
use lazy_static::lazy_static;
use log::{debug, warn};
use serde::Serialize;
@ -267,8 +267,7 @@ fn write_to_json(index: Index, search_config: &Search, doc_urls: Vec<String>) ->
}
fn clean_html(html: &str) -> String {
lazy_static! {
static ref AMMONIA: ammonia::Builder<'static> = {
static AMMONIA: Lazy<ammonia::Builder<'static>> = Lazy::new(|| {
let mut clean_content = HashSet::new();
clean_content.insert("script");
clean_content.insert("style");
@ -281,7 +280,6 @@ fn clean_html(html: &str) -> String {
.allowed_classes(HashMap::new())
.clean_content_tags(clean_content);
builder
};
}
});
AMMONIA.clean(html).to_string()
}

View File

@ -4,8 +4,8 @@ pub mod fs;
mod string;
pub(crate) mod toml_ext;
use crate::errors::Error;
use lazy_static::lazy_static;
use log::error;
use once_cell::sync::Lazy;
use pulldown_cmark::{html, CodeBlockKind, CowStr, Event, Options, Parser, Tag};
use regex::Regex;
@ -21,9 +21,7 @@ pub use self::string::{
/// Replaces multiple consecutive whitespace characters with a single space character.
pub fn collapse_whitespace(text: &str) -> Cow<'_, str> {
lazy_static! {
static ref RE: Regex = Regex::new(r"\s\s+").unwrap();
}
static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"\s\s+").unwrap());
RE.replace_all(text, " ")
}
@ -52,9 +50,7 @@ pub fn id_from_content(content: &str) -> String {
let mut content = content.to_string();
// Skip any tags or html-encoded stuff
lazy_static! {
static ref HTML: Regex = Regex::new(r"(<.*?>)").unwrap();
}
static HTML: Lazy<Regex> = Lazy::new(|| Regex::new(r"(<.*?>)").unwrap());
content = HTML.replace_all(&content, "").into();
const REPL_SUB: &[&str] = &["&lt;", "&gt;", "&amp;", "&#39;", "&quot;"];
for sub in REPL_SUB {
@ -97,10 +93,9 @@ pub fn unique_id_from_content(content: &str, id_counter: &mut HashMap<String, us
/// None. Ideally, print page links would link to anchors on the print page,
/// but that is very difficult.
fn adjust_links<'a>(event: Event<'a>, path: Option<&Path>, abs_url: Option<&String>) -> Event<'a> {
lazy_static! {
static ref SCHEME_LINK: Regex = Regex::new(r"^[a-z][a-z0-9+.-]*:").unwrap();
static ref MD_LINK: Regex = Regex::new(r"(?P<link>.*)\.md(?P<anchor>#.*)?").unwrap();
}
static SCHEME_LINK: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[a-z][a-z0-9+.-]*:").unwrap());
static MD_LINK: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?P<link>.*)\.md(?P<anchor>#.*)?").unwrap());
fn fix<'a>(dest: CowStr<'a>, path: Option<&Path>, abs_url: Option<&String>) -> CowStr<'a> {
if dest.starts_with('#') {
@ -160,10 +155,8 @@ fn adjust_links<'a>(event: Event<'a>, path: Option<&Path>, abs_url: Option<&Stri
// There are dozens of HTML tags/attributes that contain paths, so
// feel free to add more tags if desired; these are the only ones I
// care about right now.
lazy_static! {
static ref HTML_LINK: Regex =
Regex::new(r#"(<(?:a|img) [^>]*?(?:src|href)=")([^"]+?)""#).unwrap();
}
static HTML_LINK: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(<(?:a|img) [^>]*?(?:src|href)=")([^"]+?)""#).unwrap());
HTML_LINK
.replace_all(&html, |caps: &regex::Captures<'_>| {

View File

@ -1,4 +1,4 @@
use lazy_static::lazy_static;
use once_cell::sync::Lazy;
use regex::Regex;
use std::ops::Bound::{Excluded, Included, Unbounded};
use std::ops::RangeBounds;
@ -24,10 +24,10 @@ pub fn take_lines<R: RangeBounds<usize>>(s: &str, range: R) -> String {
}
}
lazy_static! {
static ref ANCHOR_START: Regex = Regex::new(r"ANCHOR:\s*(?P<anchor_name>[\w_-]+)").unwrap();
static ref ANCHOR_END: Regex = Regex::new(r"ANCHOR_END:\s*(?P<anchor_name>[\w_-]+)").unwrap();
}
static ANCHOR_START: Lazy<Regex> =
Lazy::new(|| Regex::new(r"ANCHOR:\s*(?P<anchor_name>[\w_-]+)").unwrap());
static ANCHOR_END: Lazy<Regex> =
Lazy::new(|| Regex::new(r"ANCHOR_END:\s*(?P<anchor_name>[\w_-]+)").unwrap());
/// Take anchored lines from a string.
/// Lines containing anchor are ignored.