Skip to content

Commit

Permalink
Format and check
Browse files Browse the repository at this point in the history
  • Loading branch information
dlesbre committed Feb 16, 2024
1 parent 8f10b9a commit 12a8489
Show file tree
Hide file tree
Showing 19 changed files with 57 additions and 150 deletions.
4 changes: 1 addition & 3 deletions bibtexautocomplete/APIs/dblp.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,7 @@ def get_value(self, result: SafeJSON) -> BibtexEntry:
values.pages.set_str(info["pages"].to_str())
values.title.set(info["title"].to_str())
values.volume.set(info["volume"].to_str())
values.url.set(
info["ee"].to_str() if info["access"].to_str() == "open" else None
)
values.url.set(info["ee"].to_str() if info["access"].to_str() == "open" else None)
values.year.set(info["year"].to_str())
return values

Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/APIs/doi.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@
from ..utils.safe_json import SafeJSON


class URLCheck(
ConditionMixin[str, Optional[Data]], RedirectFollower[str, Optional[Data]]
):
class URLCheck(ConditionMixin[str, Optional[Data]], RedirectFollower[str, Optional[Data]]):
"""Checks that an URL exists (should return 200)
Follows redirection (up to a certain depth)"""

Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/APIs/semantic_scholar.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,7 @@ def get_value(self, result: SafeJSON) -> BibtexEntry:

# Black formatting is VERY ugly without the two variables
j1 = result["publicationVenue"]["type"].to_str() == "journal"
j2 = "JournalArticle" in [
x.to_str() for x in result["publicationTypes"].iter_list()
]
j2 = "JournalArticle" in [x.to_str() for x in result["publicationTypes"].iter_list()]
is_journal = j1 or j2

venue = result["venue"].to_str()
Expand Down
8 changes: 2 additions & 6 deletions bibtexautocomplete/bibtex/base_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,7 @@ def combine(self, other: "BibtexField[T]") -> "BibtexField[T]":
(eg. fewer abbreviations). This will only be called on fields that match"""
if self.value is not None:
if other.value is not None:
obj = self.__class__(
self.field, self.source + SOURCE_SEPARATOR + other.source
)
obj = self.__class__(self.field, self.source + SOURCE_SEPARATOR + other.source)
obj.value = self.combine_values(self.value, other.value)
return obj
logger.warn("Combining fields which store None")
Expand Down Expand Up @@ -267,9 +265,7 @@ def match_values_slow(cls, a: List[T], b: List[T]) -> int:
return cls.compute_score(a, b, common_scores, common)

@classmethod
def compute_score(
cls, a: List[T], b: List[T], common_scores: int, common: int
) -> int:
def compute_score(cls, a: List[T], b: List[T], common_scores: int, common: int) -> int:
"""Compute the final score from the number of common elements
and the sum of the scores"""
if common == 0:
Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/bibtex/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,7 @@ def match_values(cls, a: Author, b: Author) -> int:
return FIELD_FULL_MATCH // 2
if normalize_str(a.firstnames) == normalize_str(b.firstnames):
return FIELD_FULL_MATCH
if is_abbrev(a.firstnames, b.firstnames) or is_abbrev(
b.firstnames, a.firstnames
):
if is_abbrev(a.firstnames, b.firstnames) or is_abbrev(b.firstnames, a.firstnames):
return 3 * FIELD_FULL_MATCH // 4
return FIELD_NO_MATCH

Expand Down
10 changes: 2 additions & 8 deletions bibtexautocomplete/bibtex/normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,7 @@ def has_field(entry: EntryType, field: str) -> bool:
def strip_accents(string: str) -> str:
"""replace accented characters with their non-accented variants"""
# Solution from https://stackoverflow.com/a/518232
return "".join(
c
for c in unicodedata.normalize("NFD", string)
if unicodedata.category(c) != "Mn"
)
return "".join(c for c in unicodedata.normalize("NFD", string) if unicodedata.category(c) != "Mn")


def normalize_str_weak(string: str) -> str:
Expand Down Expand Up @@ -90,9 +86,7 @@ def normalize_doi(doi_or_url: Optional[str]) -> Optional[str]:
return None


def normalize_url(
url: str, previous: Optional[str] = None
) -> Optional[Tuple[str, str]]:
def normalize_url(url: str, previous: Optional[str] = None) -> Optional[Tuple[str, str]]:
"""Splits and url into domain/path
Returns none if url is not valid"""
url_copy = url
Expand Down
30 changes: 7 additions & 23 deletions bibtexautocomplete/core/autocomplete.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,7 @@ def __init__(
if ignore_mark:
self.filter = lambda x: x["ID"] in self.entries
else:
self.filter = (
lambda x: x["ID"] in self.entries and MARKED_FIELD.lower() not in x
)
self.filter = lambda x: x["ID"] in self.entries and MARKED_FIELD.lower() not in x
self.escape_unicode = escape_unicode
self.fields_to_protect_uppercase = fields_to_protect_uppercase

Expand Down Expand Up @@ -159,9 +157,7 @@ def get_id_padding(self) -> int:
"""Return the max length of entries' ID
to use for pretty printing"""
max_id_padding = 40
return min(
max((len(entry["ID"]) + 1 for entry in self), default=0), max_id_padding
)
return min(max((len(entry["ID"]) + 1 for entry in self), default=0), max_id_padding)

def autocomplete(self, no_progressbar: bool = False) -> None:
"""Main function that does all the work
Expand Down Expand Up @@ -209,26 +205,20 @@ def autocomplete(self, no_progressbar: bool = False) -> None:
if is_verbose:
bar.text = " ".join(thread_positions)
else:
bar.text = (
f"Processed {position}/{nb_entries} entries, "
f"found {self.changed_fields} new fields"
)
bar.text = f"Processed {position}/{nb_entries} entries, " f"found {self.changed_fields} new fields"
if not step: # Some threads have not found data for current entry
condition.wait()
else: # update data for current entry
self.update_entry(entries[position], threads, position)
position += 1
logger.info(
"Modified {changed_entries} / {count_entries} entries"
", added {changed_fields} fields",
"Modified {changed_entries} / {count_entries} entries" ", added {changed_fields} fields",
changed_entries=self.changed_entries,
count_entries=self.count_entries(),
changed_fields=self.changed_fields,
)

def update_entry(
self, entry: EntryType, threads: List[LookupThread], position: int
) -> None:
def update_entry(self, entry: EntryType, threads: List[LookupThread], position: int) -> None:
"""Reads all data the threads have found on a new entry,
and uses it to update the entry with new fields"""
changes: List[Changes] = []
Expand All @@ -246,11 +236,7 @@ def update_entry(

for field in new_fields:
# Filter which fields to add
if not (
self.force_overwrite_all
or (field in self.force_overwrite)
or (not has_field(entry, field))
):
if not (self.force_overwrite_all or (field in self.force_overwrite) or (not has_field(entry, field))):
continue
bib_field = self.combine_field(results, field)
if bib_field is None:
Expand Down Expand Up @@ -280,9 +266,7 @@ def update_entry(
if self.mark:
entry[MARKED_FIELD] = datetime.today().strftime("%Y-%m-%d")

def combine_field(
self, results: List[BibtexEntry], fieldname: FieldType
) -> Optional[BibtexField[Any]]:
def combine_field(self, results: List[BibtexEntry], fieldname: FieldType) -> Optional[BibtexField[Any]]:
"""Combine the values of a single field"""
fields = [entry.get_field(fieldname) for entry in results if fieldname in entry]
groups: List[Tuple[int, BibtexField[Any]]] = []
Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/core/data_dump.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ def __init__(self, id: str) -> None:
self.new_fields = 0
self.results = {}

def add_entry(
self, lookup_name: str, entry: Optional[BibtexEntry], info: Dict[str, JSONType]
) -> None:
def add_entry(self, lookup_name: str, entry: Optional[BibtexEntry], info: Dict[str, JSONType]) -> None:
if entry is None:
self.results[lookup_name] = None
return
Expand Down
14 changes: 3 additions & 11 deletions bibtexautocomplete/core/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,7 @@ def main(argv: Optional[List[str]] = None) -> None:

HTTPSLookup.connection_timeout = args.timeout if args.timeout > 0.0 else None
HTTPSLookup.ignore_ssl = args.ignore_ssl
lookups = (
OnlyExclude[str]
.from_nonempty(args.only_query, args.dont_query)
.filter(LOOKUPS, lambda x: x.name)
)
lookups = OnlyExclude[str].from_nonempty(args.only_query, args.dont_query).filter(LOOKUPS, lambda x: x.name)
if args.only_query != []:
# remove duplicate from list
args.only_query, dups = list_unduplicate(args.only_query)
Expand All @@ -110,18 +106,14 @@ def main(argv: Optional[List[str]] = None) -> None:
if args.protect_all_uppercase:
fields_to_protect_uppercase: Container[str] = FieldNamesSet
else:
fields_to_protect_proto = OnlyExclude[str].from_nonempty(
args.protect_uppercase, args.dont_protect_uppercase
)
fields_to_protect_proto = OnlyExclude[str].from_nonempty(args.protect_uppercase, args.dont_protect_uppercase)
fields_to_protect_proto.default = False
fields_to_protect_uppercase = fields_to_protect_proto

overwrite = OnlyExclude[str].from_nonempty(args.overwrite, args.dont_overwrite)
overwrite.default = False

FieldConditionMixin.fields_to_complete = set(
fields.filter(SearchedFields, lambda x: x)
)
FieldConditionMixin.fields_to_complete = set(fields.filter(SearchedFields, lambda x: x))
FieldConditionMixin.overwrites = set(overwrite.filter(SearchedFields, lambda x: x))

if args.force_overwrite:
Expand Down
39 changes: 11 additions & 28 deletions bibtexautocomplete/core/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,9 @@ def indent_string(indent: str) -> str:
sane = indent.replace("t", "\t").replace("n", "\n").replace("_", " ")
if not (sane.isspace() or sane == ""):
logger.critical(
(
"--fi/--indent should be a number or string "
"with spaces, '_', 't' and 'n' only.\nGot: '{}'"
).format(indent)
("--fi/--indent should be a number or string " "with spaces, '_', 't' and 'n' only.\nGot: '{}'").format(
indent
)
)
exit(5)
return sane
Expand Down Expand Up @@ -108,36 +107,20 @@ def get_bibfiles(input: Path) -> List[Path]:

FIELD_NAMES = sorted(FieldNamesSet)

parser.add_argument(
"--dont-query", "-Q", action="append", default=[], choices=LOOKUP_NAMES
)
parser.add_argument(
"--only-query", "-q", action="append", default=[], choices=LOOKUP_NAMES
)
parser.add_argument(
"--dont-complete", "-C", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument(
"--only-complete", "-c", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument(
"--dont-overwrite", "-W", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument(
"--overwrite", "-w", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument("--dont-query", "-Q", action="append", default=[], choices=LOOKUP_NAMES)
parser.add_argument("--only-query", "-q", action="append", default=[], choices=LOOKUP_NAMES)
parser.add_argument("--dont-complete", "-C", action="append", default=[], choices=FIELD_NAMES)
parser.add_argument("--only-complete", "-c", action="append", default=[], choices=FIELD_NAMES)
parser.add_argument("--dont-overwrite", "-W", action="append", default=[], choices=FIELD_NAMES)
parser.add_argument("--overwrite", "-w", action="append", default=[], choices=FIELD_NAMES)

parser.add_argument("--exclude-entry", "-E", action="append", default=[])
parser.add_argument("--only-entry", "-e", action="append", default=[])

parser.add_argument("--escape-unicode", "--fu", action="store_true")
parser.add_argument("--protect-all-uppercase", "--fpa", action="store_true")
parser.add_argument(
"--protect-uppercase", "--fp", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument(
"--dont-protect-uppercase", "--FP", action="append", default=[], choices=FIELD_NAMES
)
parser.add_argument("--protect-uppercase", "--fp", action="append", default=[], choices=FIELD_NAMES)
parser.add_argument("--dont-protect-uppercase", "--FP", action="append", default=[], choices=FIELD_NAMES)

parser.add_argument("--align-values", "--fa", action="store_true")
parser.add_argument("--comma-first", "--fc", action="store_true")
Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/lookups/abstract_entry_lookup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,7 @@ def __init__(self, input: BibtexEntry) -> None:
self.entry = input


class FieldConditionMixin(
ConditionMixin["BibtexEntry", "BibtexEntry"], AbstractEntryLookup
):
class FieldConditionMixin(ConditionMixin["BibtexEntry", "BibtexEntry"], AbstractEntryLookup):
"""Mixin used to query only if there exists a field in self.fields
that does not exists in self.entry, or is in self.overwrites
Expand Down
8 changes: 2 additions & 6 deletions bibtexautocomplete/lookups/https.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,17 +166,13 @@ def get_data(self) -> Optional[Data]:
reason=" " + self.response.reason if self.response.reason else "",
delay=delay,
)
logger.very_verbose_debug(
"response headers: {headers}", headers=self.response.headers
)
logger.very_verbose_debug("response headers: {headers}", headers=self.response.headers)
data = self.response.read()
connection.close()
except timeout:
if self.silent_fail:
return None
logger.warn(
"connection timeout ({timeout}s)", timeout=self.connection_timeout
)
logger.warn("connection timeout ({timeout}s)", timeout=self.connection_timeout)
TIMEOUT_Hint.emit()
return None
except (gaierror, OSError) as err:
Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/lookups/multiple_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ def query(self) -> Optional[Output]:
return None


class DAT_Query_Mixin(
MultipleQueryMixin[BibtexEntry, BibtexEntry], AbstractEntryLookup
):
class DAT_Query_Mixin(MultipleQueryMixin[BibtexEntry, BibtexEntry], AbstractEntryLookup):
"""Performs queries using
- the entry's DOI if it is known and if query_doi is True
- the entry's title and author if known and if query_author_title is True
Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/utils/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ def list_unduplicate(lst: List[T]) -> Tuple[List[T], Set[T]]:
return unique, dups


def list_sort_using(
to_sort: Iterable[Q], reference: List[T], map: Callable[[Q], T]
) -> List[Q]:
def list_sort_using(to_sort: Iterable[Q], reference: List[T], map: Callable[[Q], T]) -> List[Q]:
"""Sorts to_sort based on the order in reference, using map for conversion"""
order = {q: i for i, q in enumerate(reference)}
return sorted(to_sort, key=lambda t: order[map(t)])
Expand Down
26 changes: 6 additions & 20 deletions bibtexautocomplete/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,7 @@ def to_logger(self, level: int, message: str, *args: Any, **kwargs: Any) -> None
message = self.add_thread_info(ansi_format(message, *args, **kwargs))
self.logger.log(level=level, msg=message)

def warn(
self, message: str, error: str = "WARNING", *args: Any, **kwargs: Any
) -> None:
def warn(self, message: str, error: str = "WARNING", *args: Any, **kwargs: Any) -> None:
"""Issue a warning, extra arguments are formatter options"""
self.to_logger(
logging.WARN,
Expand All @@ -93,21 +91,17 @@ def warn(
**kwargs,
)

def error(
self, message: str, error: str = "ERROR", *args: Any, **kwargs: Any
) -> None:
def error(self, message: str, error: str = "ERROR", *args: Any, **kwargs: Any) -> None:
"""Issue an error, extra arguments are formatter options"""
self.to_logger(
logging.ERROR,
"{FgRed}{error}:{Reset} " + message,
error=error,
*args,
error=error,
**kwargs,
)

def critical(
self, message: str, error: str = "CRITICAL ERROR", *args: Any, **kwargs: Any
) -> None:
def critical(self, message: str, error: str = "CRITICAL ERROR", *args: Any, **kwargs: Any) -> None:
"""Issue a critical error, extra arguments are formatter options"""
self.to_logger(
logging.CRITICAL,
Expand Down Expand Up @@ -178,13 +172,7 @@ def get_level(self) -> int:
def header(self, title: str, level: Level = logging.INFO) -> None:
"""Shows a pretty header, 100% inspired by opam's output"""
self.to_logger(level, "") # newline
title = (
"{FgBlue}===={Reset} {StBold}"
+ title
+ "{Reset} {FgBlue}"
+ ("=" * (74 - len(title)))
+ "{Reset}"
)
title = "{FgBlue}===={Reset} {StBold}" + title + "{Reset} {FgBlue}" + ("=" * (74 - len(title))) + "{Reset}"
self.to_logger(level, title)

def traceback(self, message: str, _err: Exception) -> None:
Expand Down Expand Up @@ -212,9 +200,7 @@ def traceback(self, message: str, _err: Exception) -> None:
error="UNEXPECTED ERROR",
tmessage=message,
ISSUES_URL=ISSUES_URL,
exn=format_exc()
.strip()
.replace("\n", "\n" + prefix + ansi_format("{FgRed}")),
exn=format_exc().strip().replace("\n", "\n" + prefix + ansi_format("{FgRed}")),
)


Expand Down
4 changes: 1 addition & 3 deletions bibtexautocomplete/utils/safe_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@ def __getitem__(self, key: Union[int, str]) -> "SafeJSON":
else:
log("SafeJSON: dict has no key {}", key)
elif self.value is not None:
log(
"SafeJSON: access to {} on non-dict {}", repr(key), type(self.value)
)
log("SafeJSON: access to {} on non-dict {}", repr(key), type(self.value))
return SafeJSON(result)

@staticmethod
Expand Down
Loading

0 comments on commit 12a8489

Please sign in to comment.