Skip to content

Commit

Permalink
Merge development into master
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] authored Jul 11, 2023
2 parents 77f3ff8 + dd9ce4d commit 3c2f940
Show file tree
Hide file tree
Showing 23 changed files with 4,726 additions and 119 deletions.
2 changes: 1 addition & 1 deletion bazarr/sonarr/sync/episodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def sync_episodes(series_id=None, send_event=True):
episodeFiles = get_episodesFiles_from_sonarr_api(url=url_sonarr(), apikey_sonarr=apikey_sonarr,
series_id=seriesId['id'])
for episode in episodes:
if episode['hasFile']:
if episodeFiles and episode['hasFile']:
item = [x for x in episodeFiles if x['id'] == episode['episodeFileId']]
if item:
episode['episodeFile'] = item[0]
Expand Down
2 changes: 1 addition & 1 deletion bazarr/subtitles/indexer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from guess_language import guess_language
from subliminal_patch import core
from subzero.language import Language
from chardet import detect
from charset_normalizer import detect

from app.config import settings
from constants import hi_regex
Expand Down
6 changes: 6 additions & 0 deletions bazarr/subtitles/upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,9 @@ def parse_language_string(language_string):


def get_upgradable_episode_subtitles():
if not settings.general.getboolean('upgrade_subs'):
return []

minimum_timestamp, query_actions = get_queries_condition_parameters()

upgradable_episodes_conditions = [(TableHistory.action << query_actions),
Expand Down Expand Up @@ -225,6 +228,9 @@ def get_upgradable_episode_subtitles():


def get_upgradable_movies_subtitles():
if not settings.general.getboolean('upgrade_subs'):
return []

minimum_timestamp, query_actions = get_queries_condition_parameters()

upgradable_movies_conditions = [(TableHistoryMovie.action << query_actions),
Expand Down
2 changes: 1 addition & 1 deletion bazarr/utilities/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import logging
import hashlib

from chardet import detect
from charset_normalizer import detect
from bs4 import UnicodeDammit

from app.config import settings
Expand Down
1 change: 1 addition & 0 deletions bazarr/utilities/video_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def _handle_alpha3(detected_language: dict):

return alpha3


def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=None, use_cache=True):
data = parse_video_metadata(file, file_size, episode_file_id, movie_file_id, use_cache=use_cache)
und_default_language = alpha3_from_alpha2(settings.general.default_und_embedded_subtitles_lang)
Expand Down
4 changes: 4 additions & 0 deletions frontend/src/pages/Settings/Subtitles/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ const commandOptions: CommandOption[] = [
option: "subtitle_id",
description: "Provider ID of the subtitle file",
},
{
option: "provider",
description: "Provider of the subtitle file",
},
{
option: "series_id",
description: "Sonarr series ID (Empty if movie)",
Expand Down
2 changes: 1 addition & 1 deletion frontend/src/pages/views/ItemOverview.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ const ItemOverview: FunctionComponent<Props> = (props) => {
<Text inherit color="white">
<Box component="span" mr={12}>
<FontAwesomeIcon
title={item?.monitored ? "unmonitored" : "monitored"}
title={item?.monitored ? "monitored" : "unmonitored"}
icon={item?.monitored ? faBookmark : farBookmark}
></FontAwesomeIcon>
</Box>
Expand Down
45 changes: 45 additions & 0 deletions libs/charset_normalizer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# -*- coding: utf-8 -*-
"""
Charset-Normalizer
~~~~~~~~~~~~~~
The Real First Universal Charset Detector.
A library that helps you read text from an unknown charset encoding.
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
All IANA character set names for which the Python core library provides codecs are supported.
Basic usage:
>>> from charset_normalizer import from_bytes
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
>>> best_guess = results.best()
>>> str(best_guess)
'Bсеки човек има право на образование. Oбразованието!'
Others methods and usages are available - see the full documentation
at <https://github.com/Ousret/charset_normalizer>.
:copyright: (c) 2021 by Ahmed TAHRI
:license: MIT, see LICENSE for more details.
"""
import logging

from .api import from_bytes, from_fp, from_path
from .legacy import detect
from .models import CharsetMatch, CharsetMatches
from .utils import set_logging_handler
from .version import VERSION, __version__

__all__ = (
"from_fp",
"from_path",
"from_bytes",
"detect",
"CharsetMatch",
"CharsetMatches",
"__version__",
"VERSION",
"set_logging_handler",
)

# Attach a NullHandler to the top level logger by default
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library

logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
Loading

0 comments on commit 3c2f940

Please sign in to comment.