Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to latest clippy and compiler #470

Merged
merged 2 commits into from
Sep 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cfgrammar/src/lib/yacc/ast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,7 @@ impl GrammarAST {
/// 3) Every token reference references a declared token
/// 4) If a production has a precedence token, then it references a declared token
/// 5) Every token declared with %epp matches a known token
///
/// If the validation succeeds, None is returned.
pub(crate) fn complete_and_validate(&mut self) -> Result<(), YaccGrammarError> {
match self.start {
Expand Down
22 changes: 11 additions & 11 deletions cfgrammar/src/lib/yacc/grammar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,7 @@ where
.rule_max_costs
.borrow_mut()
.get_or_insert_with(|| rule_max_costs(self.grm, &self.token_costs))[usize::from(ridx)];
if v == u16::max_value() {
if v == u16::MAX {
None
} else {
Some(v)
Expand Down Expand Up @@ -964,7 +964,7 @@ where
for ridx in grm.iter_rules() {
// Calling has_path so frequently is not exactly efficient...
if grm.has_path(ridx, ridx) {
costs[usize::from(ridx)] = u16::max_value();
costs[usize::from(ridx)] = u16::MAX;
done[usize::from(ridx)] = true;
}
}
Expand All @@ -988,10 +988,10 @@ where
let sc = match *sym {
Symbol::Token(s_tidx) => u16::from(token_costs[usize::from(s_tidx)]),
Symbol::Rule(s_ridx) => {
if costs[usize::from(s_ridx)] == u16::max_value() {
if costs[usize::from(s_ridx)] == u16::MAX {
// As soon as we find reference to an infinite rule, we
// can stop looking.
hs_cmplt = Some(u16::max_value());
hs_cmplt = Some(u16::MAX);
break 'a;
}
if !done[usize::from(s_ridx)] {
Expand All @@ -1003,7 +1003,7 @@ where
c = c
.checked_add(sc)
.expect("Overflow occurred when calculating rule costs");
if c == u16::max_value() {
if c == u16::MAX {
panic!("Unable to represent cost in 64 bits.");
}
}
Expand Down Expand Up @@ -1418,11 +1418,11 @@ mod test {
).unwrap();

let scores = rule_max_costs(&grm, &[1, 1, 1]);
assert_eq!(scores[usize::from(grm.rule_idx("A").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("B").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("C").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("D").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("E").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("A").unwrap())], u16::MAX);
assert_eq!(scores[usize::from(grm.rule_idx("B").unwrap())], u16::MAX);
assert_eq!(scores[usize::from(grm.rule_idx("C").unwrap())], u16::MAX);
assert_eq!(scores[usize::from(grm.rule_idx("D").unwrap())], u16::MAX);
assert_eq!(scores[usize::from(grm.rule_idx("E").unwrap())], u16::MAX);
}

#[test]
Expand All @@ -1441,7 +1441,7 @@ mod test {
).unwrap();

let scores = rule_max_costs(&grm, &[1, 1, 1]);
assert_eq!(scores[usize::from(grm.rule_idx("A").unwrap())], u16::max_value());
assert_eq!(scores[usize::from(grm.rule_idx("A").unwrap())], u16::MAX);
assert_eq!(scores[usize::from(grm.rule_idx("B").unwrap())], 3);
assert_eq!(scores[usize::from(grm.rule_idx("C").unwrap())], 2);
assert_eq!(scores[usize::from(grm.rule_idx("D").unwrap())], 3);
Expand Down
1 change: 1 addition & 0 deletions lrlex/src/lib/lexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ where
/// track of which lexemes:
/// 1) are defined in the lexer but not referenced by the parser
/// 2) and referenced by the parser but not defined in the lexer
///
/// and returns them as a tuple `(Option<HashSet<&str>>, Option<HashSet<&str>>)` in the order
/// (*defined_in_lexer_missing_from_parser*, *referenced_in_parser_missing_from_lexer*). Since
/// in most cases both sets are expected to be empty, `None` is returned to avoid a `HashSet`
Expand Down
18 changes: 0 additions & 18 deletions lrlex/src/lib/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -645,12 +645,7 @@ mod test {
}};
}

fn line_of_offset(s: &str, off: usize) -> usize {
s[..off].lines().count()
}

trait ErrorsHelper {
fn expect_error_at_line(self, src: &str, kind: LexErrorKind, line: usize);
fn expect_error_at_line_col(self, src: &str, kind: LexErrorKind, line: usize, col: usize);
fn expect_error_at_lines_cols(
self,
Expand All @@ -666,19 +661,6 @@ mod test {
}

impl ErrorsHelper for Result<LRNonStreamingLexerDef<DefaultLexerTypes<u8>>, Vec<LexBuildError>> {
#[track_caller]
fn expect_error_at_line(self, src: &str, kind: LexErrorKind, line: usize) {
let errs = self
.as_ref()
.map_err(Vec::as_slice)
.expect_err("Parsed ok while expecting error");
assert_eq!(errs.len(), 1);
let e = &errs[0];
assert_eq!(e.kind, kind);
assert_eq!(line_of_offset(src, e.spans()[0].start()), line);
assert_eq!(e.spans.len(), 1);
}

#[track_caller]
fn expect_error_at_line_col(self, src: &str, kind: LexErrorKind, line: usize, col: usize) {
self.expect_error_at_lines_cols(src, kind, &mut std::iter::once((line, col)))
Expand Down
2 changes: 1 addition & 1 deletion lrpar/cttests_macro/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2021"

[lib]
proc_macro = true
proc-macro = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
Expand Down
1 change: 1 addition & 0 deletions lrpar/src/lib/ctbuilder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ where
/// * big enough to index (separately) all the tokens, rules, productions in the grammar,
/// * big enough to index the state table created from the grammar,
/// * less than or equal in size to `u32`.
///
/// In other words, if you have a grammar with 256 tokens, 256 rules, and 256 productions,
/// which creates a state table of 256 states you can safely specify `u8` here; but if any of
/// those counts becomes 257 or greater you will need to specify `u16`. If you are parsing
Expand Down
2 changes: 1 addition & 1 deletion lrpar/src/lib/dijkstra.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use indexmap::{
/// Starting at `start_node`, return, in arbitrary order, all least-cost success nodes.
///
/// * `neighbours` takes a node `n` and returns an iterator consisting of all `n`'s neighbouring
/// nodes.
/// nodes.
/// * `success` takes a node `n` and returns `true` if it is a success node or `false` otherwise.
///
/// The name of this function isn't entirely accurate: this isn't Dijkstra's original algorithm or
Expand Down
2 changes: 1 addition & 1 deletion lrtable/src/lib/pager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ impl<StorageT: Hash + PrimInt + Unsigned> Itemset<StorageT> {

// Check that each itemset has the same core configuration.
for &(pidx, dot) in self.items.keys() {
if other.items.get(&(pidx, dot)).is_none() {
if !other.items.contains_key(&(pidx, dot)) {
return false;
}
}
Expand Down
4 changes: 2 additions & 2 deletions lrtable/src/lib/statetable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,8 @@ where
let maxa = usize::from(grm.tokens_len()) * usize::from(sg.all_states_len());
let maxg = usize::from(grm.rules_len()) * usize::from(sg.all_states_len());
// We only have usize-2 bits to store state IDs and rule indexes
assert!(usize::from(sg.all_states_len()) < (usize::max_value() - 4));
assert!(usize::from(grm.rules_len()) < (usize::max_value() - 4));
assert!(usize::from(sg.all_states_len()) < (usize::MAX - 4));
assert!(usize::from(grm.rules_len()) < (usize::MAX - 4));
let mut actions: Vec<usize> = vec![0; maxa];

// Since 0 is reserved for the error type, and states are encoded by adding 1, we can only
Expand Down